summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/general/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/general/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py88
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py64
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py499
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py1420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py253
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py366
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py106
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py65
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py103
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py85
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py54
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py70
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py272
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py260
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py229
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py650
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py100
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py747
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py871
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py2580
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py1022
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py799
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py106
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py41
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py441
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py482
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py590
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py1962
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py314
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py86
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py2694
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py97
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py862
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py521
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py204
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py567
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py337
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py1013
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py450
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py572
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py526
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py584
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py512
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py935
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py965
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py1557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py409
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py1013
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py526
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py584
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py512
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py935
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py965
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py1557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py409
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py568
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py3563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py145
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py1021
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py265
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py675
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py3004
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py372
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py753
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py535
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py2135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py1210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py493
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py338
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py877
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py691
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py1160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py645
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py570
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py734
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py184
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py690
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py1760
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py710
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py503
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py735
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py1449
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py1516
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py232
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py677
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py1026
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py454
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py341
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py705
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py768
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py1599
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py187
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py651
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py654
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py288
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py425
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py626
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py897
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py282
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py352
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py608
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py174
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py438
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py671
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py371
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py238
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py796
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py428
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py1543
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py576
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py542
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py1933
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py878
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py603
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py657
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py345
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py339
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py267
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py603
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py657
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py423
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py521
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py667
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py443
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py589
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py1030
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py453
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py745
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py1171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py682
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py452
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py627
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py447
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py611
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py541
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py993
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py568
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py347
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py3563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py145
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py1021
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py265
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py675
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py3004
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py572
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py334
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py958
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py496
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py312
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py372
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py753
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py535
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py273
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py374
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py348
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py604
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py479
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py509
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py971
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py875
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py203
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py274
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py2135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py1210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py493
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py338
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py877
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py691
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py1160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py645
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py570
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py734
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py355
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py400
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py249
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py879
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py364
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py327
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py427
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py565
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py334
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py399
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py355
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py400
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py249
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py105
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py194
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py649
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py401
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py531
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py615
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py879
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py364
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py184
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py514
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py284
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py318
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py690
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py243
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py79
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py328
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py566
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py1760
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py710
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py307
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py386
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py648
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py928
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py289
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py712
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py340
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py204
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py340
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py1304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py610
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py1304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py878
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py479
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py509
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py565
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py105
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py284
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py318
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py79
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py519
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py1115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py459
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py519
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py1115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py345
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py537
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py386
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py487
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py768
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py1599
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py677
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py1026
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py454
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py341
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py705
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py375
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py653
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py395
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py503
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py187
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py267
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py712
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py394
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py312
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py971
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py875
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py307
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py653
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py481
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py388
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py539
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py754
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py930
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py245
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py644
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py402
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py651
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py481
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py317
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py797
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py388
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py539
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py667
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py443
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py589
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py1030
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py453
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py745
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py1171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py682
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py452
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py627
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py447
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py611
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py541
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py993
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py654
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py288
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py425
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py735
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py1449
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py626
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py754
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py858
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py652
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py897
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py282
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py352
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py608
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py174
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py438
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py756
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py930
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py339
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py615
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py203
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py427
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py194
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py648
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py928
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py289
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py327
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py756
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py1516
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py245
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py671
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py371
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py238
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py232
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py428
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py487
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py459
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py644
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py273
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py374
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py348
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py1543
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py604
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py610
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py858
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py652
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py209
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py262
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py567
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py337
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py423
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py496
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py399
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py649
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py401
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py514
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py243
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py328
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py566
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py537
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py375
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py395
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py317
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py797
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py905
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py594
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py905
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py576
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py542
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py594
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py362
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py209
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py796
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py450
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py347
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py274
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py531
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py362
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py1933
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py958
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py394
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py262
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py402
1541 files changed, 494731 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py
new file mode 100644
index 00000000..92fb079a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py
@@ -0,0 +1,198 @@
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.action import ActionBase
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
+from ansible.utils.vars import merge_hash
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ # Keep internal params away from user interactions
+ _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
+ DEFAULT_SUDOABLE = True
+
+ MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+ MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
+ "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
+ "(recommended).")
+ MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+
+ def _async_result(self, module_args, task_vars, timeout):
+ '''
+ Retrieve results of the asynchonous task, and display them in place of
+ the async wrapper results (those with the ansible_job_id key).
+ '''
+ # At least one iteration is required, even if timeout is 0.
+ for i in range(max(1, timeout)):
+ async_result = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=module_args,
+ task_vars=task_vars,
+ wrap_async=False)
+ if async_result['finished'] == 1:
+ break
+ time.sleep(min(1, timeout))
+
+ return async_result
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # Set short names for values we'll have to compare or reuse
+ task_poll = self._task.poll
+ task_async = self._task.async_val
+ check_mode = self._play_context.check_mode
+ max_timeout = self._connection._play_context.timeout
+ module_name = self._task.action
+ module_args = self._task.args
+
+ if module_args.get('state', None) == 'restored':
+ if not wrap_async:
+ if not check_mode:
+ display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ task_poll,
+ task_async,
+ max_timeout))
+ elif task_poll:
+ raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ task_poll,
+ task_async,
+ max_timeout))
+ else:
+ if task_async > max_timeout and not check_mode:
+ display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ task_poll,
+ task_async,
+ max_timeout))
+
+ # BEGIN snippet from async_status action plugin
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ # for backwards compatibility we need to get the dir from
+ # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
+ # deprecated and will be removed in favour of shell options
+ async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
+
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ display.deprecated(msg, version='2.0.0',
+ collection_name='community.general') # was Ansible 2.12
+ else:
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ # END snippet from async_status action plugin
+
+ # Bind the loop max duration to consistent values on both
+ # remote and local sides (if not the same, make the loop
+ # longer on the controller); and set a backup file path.
+ module_args['_timeout'] = task_async
+ module_args['_back'] = '%s/iptables.state' % async_dir
+ async_status_args = dict(_async_dir=async_dir)
+ confirm_cmd = 'rm -f %s' % module_args['_back']
+ starter_cmd = 'touch %s.starter' % module_args['_back']
+ remaining_time = max(task_async, max_timeout)
+
+ # do work!
+ result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ # Then the 3-steps "go ahead or rollback":
+ # 1. Catch early errors of the module (in asynchronous task) if any.
+ # Touch a file on the target to signal the module to process now.
+ # 2. Reset connection to ensure a persistent one will not be reused.
+ # 3. Confirm the restored state by removing the backup on the remote.
+ # Retrieve the results of the asynchronous task to return them.
+ if '_back' in module_args:
+ async_status_args['jid'] = result.get('ansible_job_id', None)
+ if async_status_args['jid'] is None:
+ raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
+
+ # Catch early errors due to missing mandatory option, bad
+ # option type/value, missing required system command, etc.
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
+
+ # The module is aware to not process the main iptables-restore
+ # command before finding (and deleting) the 'starter' cookie on
+ # the host, so the previous query will not reach ssh timeout.
+ garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
+
+ # As the main command is not yet executed on the target, here
+ # 'finished' means 'failed before main command be executed'.
+ if not result['finished']:
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ for x in range(max_timeout):
+ time.sleep(1)
+ remaining_time -= 1
+ # - AnsibleConnectionFailure covers rejected requests (i.e.
+ # by rules with '--jump REJECT')
+ # - ansible_timeout is able to cover dropped requests (due
+ # to a rule or policy DROP) if not lower than async_val.
+ try:
+ garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
+ break
+ except AnsibleConnectionFailure:
+ continue
+
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
+
+ # Cleanup async related stuff and internal params
+ for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
+ if result.get(key):
+ del result[key]
+
+ if result.get('invocation', {}).get('module_args'):
+ if '_timeout' in result['invocation']['module_args']:
+ del result['invocation']['module_args']['_back']
+ del result['invocation']['module_args']['_timeout']
+
+ async_status_args['mode'] = 'cleanup'
+ garbage = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=async_status_args,
+ task_vars=task_vars,
+ wrap_async=False)
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py
new file mode 100644
index 00000000..e36397ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py
@@ -0,0 +1,211 @@
+# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'msg',
+ 'delay',
+ 'search_paths'
+ ))
+
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_SHUTDOWN_DELAY = 0
+ DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'poweroff',
+ 'vmkernel': 'halt',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-h +{delay_min} "{message}"',
+ 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-h +{delay_min} "{message}"',
+ 'openbsd': '-h +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fh',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def delay(self):
+ return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, default))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def perform_shutdown(self, task_vars, distribution):
+ result = {}
+ shutdown_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ self.cleanup(force=True)
+ try:
+ display.vvv("{action}: shutting down server...".format(action=self._task.action))
+ display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ if self._play_context.check_mode:
+ shutdown_result['rc'] = 0
+ else:
+ shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ shutdown_result['rc'] = 0
+
+ if shutdown_result['rc'] != 0:
+ result['failed'] = True
+ result['shutdown'] = False
+ result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(shutdown_result['stdout'].strip()),
+ stderr=to_native(shutdown_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ result['shutdown_command'] = shutdown_command_exec
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't shutdown ourself
+ if self._connection.transport == 'local' and (not self._play_context.check_mode):
+ msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
+
+ if task_vars is None:
+ task_vars = {}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Initiate shutdown
+ shutdown_result = self.perform_shutdown(task_vars, distribution)
+
+ if shutdown_result['failed']:
+ result = shutdown_result
+ return result
+
+ result['shutdown'] = True
+ result['changed'] = True
+ result['shutdown_command'] = shutdown_result['shutdown_command']
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py
new file mode 100644
index 00000000..92fb079a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py
@@ -0,0 +1,198 @@
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.action import ActionBase
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
+from ansible.utils.vars import merge_hash
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ # Keep internal params away from user interactions
+ _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
+ DEFAULT_SUDOABLE = True
+
+ MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+ MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
+ "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
+ "(recommended).")
+ MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+
+ def _async_result(self, module_args, task_vars, timeout):
+ '''
+ Retrieve results of the asynchonous task, and display them in place of
+ the async wrapper results (those with the ansible_job_id key).
+ '''
+ # At least one iteration is required, even if timeout is 0.
+ for i in range(max(1, timeout)):
+ async_result = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=module_args,
+ task_vars=task_vars,
+ wrap_async=False)
+ if async_result['finished'] == 1:
+ break
+ time.sleep(min(1, timeout))
+
+ return async_result
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # Set short names for values we'll have to compare or reuse
+ task_poll = self._task.poll
+ task_async = self._task.async_val
+ check_mode = self._play_context.check_mode
+ max_timeout = self._connection._play_context.timeout
+ module_name = self._task.action
+ module_args = self._task.args
+
+ if module_args.get('state', None) == 'restored':
+ if not wrap_async:
+ if not check_mode:
+ display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ task_poll,
+ task_async,
+ max_timeout))
+ elif task_poll:
+ raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ task_poll,
+ task_async,
+ max_timeout))
+ else:
+ if task_async > max_timeout and not check_mode:
+ display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ task_poll,
+ task_async,
+ max_timeout))
+
+ # BEGIN snippet from async_status action plugin
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ # for backwards compatibility we need to get the dir from
+ # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
+ # deprecated and will be removed in favour of shell options
+ async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
+
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ display.deprecated(msg, version='2.0.0',
+ collection_name='community.general') # was Ansible 2.12
+ else:
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ # END snippet from async_status action plugin
+
+ # Bind the loop max duration to consistent values on both
+ # remote and local sides (if not the same, make the loop
+ # longer on the controller); and set a backup file path.
+ module_args['_timeout'] = task_async
+ module_args['_back'] = '%s/iptables.state' % async_dir
+ async_status_args = dict(_async_dir=async_dir)
+ confirm_cmd = 'rm -f %s' % module_args['_back']
+ starter_cmd = 'touch %s.starter' % module_args['_back']
+ remaining_time = max(task_async, max_timeout)
+
+ # do work!
+ result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ # Then the 3-steps "go ahead or rollback":
+ # 1. Catch early errors of the module (in asynchronous task) if any.
+ # Touch a file on the target to signal the module to process now.
+ # 2. Reset connection to ensure a persistent one will not be reused.
+ # 3. Confirm the restored state by removing the backup on the remote.
+ # Retrieve the results of the asynchronous task to return them.
+ if '_back' in module_args:
+ async_status_args['jid'] = result.get('ansible_job_id', None)
+ if async_status_args['jid'] is None:
+ raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
+
+ # Catch early errors due to missing mandatory option, bad
+ # option type/value, missing required system command, etc.
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
+
+ # The module is aware to not process the main iptables-restore
+ # command before finding (and deleting) the 'starter' cookie on
+ # the host, so the previous query will not reach ssh timeout.
+ garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
+
+ # As the main command is not yet executed on the target, here
+ # 'finished' means 'failed before main command be executed'.
+ if not result['finished']:
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ for x in range(max_timeout):
+ time.sleep(1)
+ remaining_time -= 1
+ # - AnsibleConnectionFailure covers rejected requests (i.e.
+ # by rules with '--jump REJECT')
+ # - ansible_timeout is able to cover dropped requests (due
+ # to a rule or policy DROP) if not lower than async_val.
+ try:
+ garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
+ break
+ except AnsibleConnectionFailure:
+ continue
+
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
+
+ # Cleanup async related stuff and internal params
+ for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
+ if result.get(key):
+ del result[key]
+
+ if result.get('invocation', {}).get('module_args'):
+ if '_timeout' in result['invocation']['module_args']:
+ del result['invocation']['module_args']['_back']
+ del result['invocation']['module_args']['_timeout']
+
+ async_status_args['mode'] = 'cleanup'
+ garbage = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=async_status_args,
+ task_vars=task_vars,
+ wrap_async=False)
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py
new file mode 100644
index 00000000..e36397ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py
@@ -0,0 +1,211 @@
+# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'msg',
+ 'delay',
+ 'search_paths'
+ ))
+
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_SHUTDOWN_DELAY = 0
+ DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'poweroff',
+ 'vmkernel': 'halt',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-h +{delay_min} "{message}"',
+ 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-h +{delay_min} "{message}"',
+ 'openbsd': '-h +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fh',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def delay(self):
+ return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, default))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def perform_shutdown(self, task_vars, distribution):
+ result = {}
+ shutdown_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ self.cleanup(force=True)
+ try:
+ display.vvv("{action}: shutting down server...".format(action=self._task.action))
+ display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ if self._play_context.check_mode:
+ shutdown_result['rc'] = 0
+ else:
+ shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ shutdown_result['rc'] = 0
+
+ if shutdown_result['rc'] != 0:
+ result['failed'] = True
+ result['shutdown'] = False
+ result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(shutdown_result['stdout'].strip()),
+ stderr=to_native(shutdown_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ result['shutdown_command'] = shutdown_command_exec
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't shutdown ourself
+ if self._connection.transport == 'local' and (not self._play_context.check_mode):
+ msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
+
+ if task_vars is None:
+ task_vars = {}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Initiate shutdown
+ shutdown_result = self.perform_shutdown(task_vars, distribution)
+
+ if shutdown_result['failed']:
+ result = shutdown_result
+ return result
+
+ result['shutdown'] = True
+ result['changed'] = True
+ result['shutdown_command'] = shutdown_result['shutdown_command']
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py
new file mode 100644
index 00000000..d7f4ad81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: doas
+ short_description: Do As user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: doas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_doas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DOAS_USER
+ become_exe:
+ description: Doas executable
+ default: doas
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: doas_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_doas_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DOAS_EXE
+ become_flags:
+ description: Options to pass to doas
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: doas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_doas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DOAS_FLAGS
+ become_pass:
+ description: password for doas prompt
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_doas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DOAS_PASS
+ ini:
+ - section: doas_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: doas_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_doas_prompt_l10n
+ env:
+ - name: ANSIBLE_DOAS_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.doas'
+
+ # messages for detecting prompted password issues
+ fail = ('Permission denied',)
+ missing = ('Authorization required',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ # FIXME: more accurate would be: 'doas (%s@' % remote_user
+ # however become plugins don't have that information currently
+ b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:']
+ b_prompt = b"|".join(b_prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ self.prompt = True
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if not self.get_option('become_pass') and '-n' not in flags:
+ flags += ' -n'
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ success_cmd = self._build_success_command(cmd, shell, noexe=True)
+ executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
+
+ return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py
new file mode 100644
index 00000000..a0ff4c05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: dzdo
+ short_description: Centrify's Direct Authorize
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: dzdo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_dzdo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DZDO_USER
+ become_exe:
+ description: Dzdo executable
+ default: dzdo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: dzdo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_dzdo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DZDO_EXE
+ become_flags:
+ description: Options to pass to dzdo
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: dzdo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_dzdo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DZDO_FLAGS
+ become_pass:
+ description: Options to pass to dzdo
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_dzdo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DZDO_PASS
+ ini:
+ - section: dzdo_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.dzdo'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if self.get_option('become_pass'):
+ self.prompt = '[dzdo via ansible, key=%s] password:' % self._id
+ flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt)
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py
new file mode 100644
index 00000000..d81b7a11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: ksu
+ short_description: Kerberos substitute user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: ksu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_ksu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_KSU_USER
+ required: True
+ become_exe:
+ description: Su executable
+ default: ksu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: ksu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_ksu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_KSU_EXE
+ become_flags:
+ description: Options to pass to ksu
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: ksu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_ksu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_KSU_FLAGS
+ become_pass:
+ description: ksu password
+ required: False
+ vars:
+ - name: ansible_ksu_pass
+ - name: ansible_become_pass
+ - name: ansible_become_password
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_KSU_PASS
+ ini:
+ - section: ksu_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: ksu_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_ksu_prompt_l10n
+ env:
+ - name: ANSIBLE_KSU_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.ksu'
+
+ # messages for detecting prompted password issues
+ fail = ('Password incorrect',)
+ missing = ('No password given',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ prompts = self.get_option('prompt_l10n') or ["Kerberos password for .*@.*:"]
+ b_prompt = b"|".join(to_bytes(p) for p in prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ # Prompt handling for ``ksu`` is more complicated, this
+ # is used to satisfy the connection plugin
+ self.prompt = True
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py
new file mode 100644
index 00000000..6751f9b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: machinectl
+ short_description: Systemd's machinectl privilege escalation
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: machinectl_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_machinectl_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_MACHINECTL_USER
+ become_exe:
+ description: Machinectl executable
+ default: machinectl
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: machinectl_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_machinectl_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_MACHINECTL_EXE
+ become_flags:
+ description: Options to pass to machinectl
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: machinectl_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_machinectl_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_MACHINECTL_FLAGS
+ become_pass:
+ description: Password for machinectl
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_machinectl_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_MACHINECTL_PASS
+ ini:
+ - section: machinectl_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.machinectl'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py
new file mode 100644
index 00000000..9d64ff6a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pbrun
+ short_description: PowerBroker run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pbrun_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pbrun_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PBRUN_USER
+ become_exe:
+ description: Sudo executable
+ default: pbrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pbrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pbrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PBRUN_EXE
+ become_flags:
+ description: Options to pass to pbrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pbrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pbrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PBRUN_FLAGS
+ become_pass:
+ description: Password for pbrun
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pbrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PBRUN_PASS
+ ini:
+ - section: pbrun_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pbrun calls in 'shell -c' or not
+ default: False
+ type: bool
+ ini:
+ - section: pbrun_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pbrun_wrap_execution
+ env:
+ - name: ANSIBLE_PBRUN_WRAP_EXECUTION
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pbrun'
+
+ prompt = 'Password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+ noexe = not self.get_option('wrap_exe')
+
+ return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py
new file mode 100644
index 00000000..d86af6e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pfexec
+ short_description: profile based execution
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description:
+ - User you 'become' to execute the task
+ - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
+ but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pfexec_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pfexec_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PFEXEC_USER
+ become_exe:
+ description: Sudo executable
+ default: pfexec
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pfexec_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pfexec_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PFEXEC_EXE
+ become_flags:
+ description: Options to pass to pfexec
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pfexec_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pfexec_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PFEXEC_FLAGS
+ become_pass:
+ description: pfexec password
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pfexec_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PFEXEC_PASS
+ ini:
+ - section: pfexec_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pfexec calls in 'shell -c' or not
+ default: False
+ type: bool
+ ini:
+ - section: pfexec_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pfexec_wrap_execution
+ env:
+ - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
+ notes:
+ - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pfexec'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ noexe = not self.get_option('wrap_exe')
+ return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py
new file mode 100644
index 00000000..52fc3360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pmrun
+ short_description: Privilege Manager run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
+ author: ansible (@core)
+ options:
+ become_exe:
+ description: Sudo executable
+ default: pmrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pmrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pmrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PMRUN_EXE
+ become_flags:
+ description: Options to pass to pmrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pmrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pmrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PMRUN_FLAGS
+ become_pass:
+ description: pmrun password
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pmrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PMRUN_PASS
+ ini:
+ - section: pmrun_become_plugin
+ key: password
+ notes:
+ - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user.
+'''
+
+from ansible.plugins.become import BecomeBase
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pmrun'
+ prompt = 'Enter UPM user password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py
new file mode 100644
index 00000000..b56e6ee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: sesu
+ short_description: CA Privileged Access Manager
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
+ author: ansible (@nekonyuu)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sesu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sesu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SESU_USER
+ become_exe:
+ description: sesu executable
+ default: sesu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sesu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sesu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SESU_EXE
+ become_flags:
+ description: Options to pass to sesu
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sesu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sesu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SESU_FLAGS
+ become_pass:
+ description: Password to pass to sesu
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sesu_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SESU_PASS
+ ini:
+ - section: sesu_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.sesu'
+
+ prompt = 'Please enter your password:'
+ fail = missing = ('Sorry, try again with sesu.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py
new file mode 100644
index 00000000..3cf670d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py
@@ -0,0 +1,248 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ cache: memcached
+ short_description: Use memcached DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in memcached.
+ requirements:
+ - memcache (python lib)
+ options:
+ _uri:
+ description:
+ - List of connection information for the memcached DBs
+ default: ['127.0.0.1:11211']
+ type: list
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import collections
+import os
+import time
+from multiprocessing import Lock
+from itertools import chain
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.common._collections_compat import MutableSet
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ import memcache
+ HAS_MEMCACHE = True
+except ImportError:
+ HAS_MEMCACHE = False
+
+display = Display()
+
+
+class ProxyClientPool(object):
+ """
+ Memcached connection pooling for thread/fork safety. Inspired by py-redis
+ connection pool.
+
+ Available connections are maintained in a deque and released in a FIFO manner.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.max_connections = kwargs.pop('max_connections', 1024)
+ self.connection_args = args
+ self.connection_kwargs = kwargs
+ self.reset()
+
+ def reset(self):
+ self.pid = os.getpid()
+ self._num_connections = 0
+ self._available_connections = collections.deque(maxlen=self.max_connections)
+ self._locked_connections = set()
+ self._lock = Lock()
+
+ def _check_safe(self):
+ if self.pid != os.getpid():
+ with self._lock:
+ if self.pid == os.getpid():
+ # bail out - another thread already acquired the lock
+ return
+ self.disconnect_all()
+ self.reset()
+
+ def get_connection(self):
+ self._check_safe()
+ try:
+ connection = self._available_connections.popleft()
+ except IndexError:
+ connection = self.create_connection()
+ self._locked_connections.add(connection)
+ return connection
+
+ def create_connection(self):
+ if self._num_connections >= self.max_connections:
+ raise RuntimeError("Too many memcached connections")
+ self._num_connections += 1
+ return memcache.Client(*self.connection_args, **self.connection_kwargs)
+
+ def release_connection(self, connection):
+ self._check_safe()
+ self._locked_connections.remove(connection)
+ self._available_connections.append(connection)
+
+ def disconnect_all(self):
+ for conn in chain(self._available_connections, self._locked_connections):
+ conn.disconnect_all()
+
+ def __getattr__(self, name):
+ def wrapped(*args, **kwargs):
+ return self._proxy_client(name, *args, **kwargs)
+ return wrapped
+
+ def _proxy_client(self, name, *args, **kwargs):
+ conn = self.get_connection()
+
+ try:
+ return getattr(conn, name)(*args, **kwargs)
+ finally:
+ self.release_connection(conn)
+
+
+class CacheModuleKeys(MutableSet):
+ """
+ A set subclass that keeps track of insertion time and persists
+ the set in memcached.
+ """
+ PREFIX = 'ansible_cache_keys'
+
+ def __init__(self, cache, *args, **kwargs):
+ self._cache = cache
+ self._keyset = dict(*args, **kwargs)
+
+ def __contains__(self, key):
+ return key in self._keyset
+
+ def __iter__(self):
+ return iter(self._keyset)
+
+ def __len__(self):
+ return len(self._keyset)
+
+ def add(self, key):
+ self._keyset[key] = time.time()
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def discard(self, key):
+ del self._keyset[key]
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def remove_by_timerange(self, s_min, s_max):
+ for k in self._keyset.keys():
+ t = self._keyset[k]
+ if s_min < t < s_max:
+ del self._keyset[k]
+ self._cache.set(self.PREFIX, self._keyset)
+
+
+class CacheModule(BaseCacheModule):
+
+ def __init__(self, *args, **kwargs):
+ connection = ['127.0.0.1:11211']
+
+ try:
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ connection = self.get_option('_uri')
+ self._timeout = self.get_option('_timeout')
+ self._prefix = self.get_option('_prefix')
+ except KeyError:
+ display.deprecated('Rather than importing CacheModules directly, '
+ 'use ansible.plugins.loader.cache_loader',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if C.CACHE_PLUGIN_CONNECTION:
+ connection = C.CACHE_PLUGIN_CONNECTION.split(',')
+ self._timeout = C.CACHE_PLUGIN_TIMEOUT
+ self._prefix = C.CACHE_PLUGIN_PREFIX
+
+ if not HAS_MEMCACHE:
+ raise AnsibleError("python-memcached is required for the memcached fact cache")
+
+ self._cache = {}
+ self._db = ProxyClientPool(connection, debug=0)
+ self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
+
+ def _make_key(self, key):
+ return "{0}{1}".format(self._prefix, key)
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._keys.remove_by_timerange(0, expiry_age)
+
+ def get(self, key):
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the keyset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = value
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+ self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
+ self._cache[key] = value
+ self._keys.add(key)
+
+ def keys(self):
+ self._expire_keys()
+ return list(iter(self._keys))
+
+ def contains(self, key):
+ self._expire_keys()
+ return key in self._keys
+
+ def delete(self, key):
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._keys.discard(key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ return self._keys.copy()
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py
new file mode 100644
index 00000000..80b00b4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py
@@ -0,0 +1,67 @@
+# (c) 2017, Brian Coca
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: pickle
+ short_description: Pickle formatted files.
+ description:
+ - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+'''
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from ansible.module_utils.six import PY3
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by pickle files.
+ """
+
+ def _load(self, filepath):
+ # Pickle is a binary format
+ with open(filepath, 'rb') as f:
+ if PY3:
+ return pickle.load(f, encoding='bytes')
+ else:
+ return pickle.load(f)
+
+ def _dump(self, value, filepath):
+ with open(filepath, 'wb') as f:
+ # Use pickle protocol 2 which is compatible with Python 2.3+.
+ pickle.dump(value, f, protocol=2)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py
new file mode 100644
index 00000000..fe41c4c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py
@@ -0,0 +1,233 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ cache: redis
+ short_description: Use Redis DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in Redis.
+ requirements:
+ - redis>=2.4.5 (python lib)
+ options:
+ _uri:
+ description:
+ - A colon separated string of connection information for Redis.
+ - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
+ - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
+ - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
+ required: True
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _keyset_name:
+ description: User defined name for cache keyset name.
+ default: ansible_cache_keys
+ env:
+ - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
+ ini:
+ - key: fact_caching_redis_keyset_name
+ section: defaults
+ version_added: 1.3.0
+ _sentinel_service_name:
+ description: The redis sentinel service name (or referenced as cluster name).
+ env:
+ - name: ANSIBLE_CACHE_REDIS_SENTINEL
+ ini:
+ - key: fact_caching_redis_sentinel
+ section: defaults
+ version_added: 1.3.0
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import time
+import json
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ from redis import StrictRedis, VERSION
+ HAS_REDIS = True
+except ImportError:
+ HAS_REDIS = False
+
+display = Display()
+
+
+class CacheModule(BaseCacheModule):
+ """
+ A caching module backed by redis.
+
+ Keys are maintained in a zset with their score being the timestamp
+ when they are inserted. This allows for the usage of 'zremrangebyscore'
+ to expire keys. This mechanism is used or a pattern matched 'scan' for
+ performance.
+ """
+ _sentinel_service_name = None
+
+ def __init__(self, *args, **kwargs):
+ uri = ''
+
+ try:
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ uri = self.get_option('_uri')
+ self._timeout = float(self.get_option('_timeout'))
+ self._prefix = self.get_option('_prefix')
+ self._keys_set = self.get_option('_keyset_name')
+ self._sentinel_service_name = self.get_option('_sentinel_service_name')
+ except KeyError:
+ display.deprecated('Rather than importing CacheModules directly, '
+ 'use ansible.plugins.loader.cache_loader',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if C.CACHE_PLUGIN_CONNECTION:
+ uri = C.CACHE_PLUGIN_CONNECTION
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self._prefix = C.CACHE_PLUGIN_PREFIX
+ self._keys_set = 'ansible_cache_keys'
+
+ if not HAS_REDIS:
+ raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
+
+ self._cache = {}
+ kw = {}
+
+ # tls connection
+ tlsprefix = 'tls://'
+ if uri.startswith(tlsprefix):
+ kw['ssl'] = True
+ uri = uri[len(tlsprefix):]
+
+ # redis sentinel connection
+ if self._sentinel_service_name:
+ self._db = self._get_sentinel_connection(uri, kw)
+ # normal connection
+ else:
+ connection = uri.split(':')
+ self._db = StrictRedis(*connection, **kw)
+
+ display.vv('Redis connection: %s' % self._db)
+
+ def _get_sentinel_connection(self, uri, kw):
+ """
+ get sentinel connection details from _uri
+ """
+ try:
+ from redis.sentinel import Sentinel
+ except ImportError:
+ raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
+
+ if ';' not in uri:
+ raise AnsibleError('_uri does not have sentinel syntax.')
+
+ # format: "localhost:26379;localhost2:26379;0:changeme"
+ connections = uri.split(';')
+ connection_args = connections.pop(-1)
+ if len(connection_args) > 0: # hanle if no db nr is given
+ connection_args = connection_args.split(':')
+ kw['db'] = connection_args.pop(0)
+ try:
+ kw['password'] = connection_args.pop(0)
+ except IndexError:
+ pass # password is optional
+
+ sentinels = [tuple(shost.split(':')) for shost in connections]
+ display.vv('\nUsing redis sentinels: %s' % sentinels)
+ scon = Sentinel(sentinels, **kw)
+ try:
+ return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
+ except Exception as exc:
+ raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
+
+ def _make_key(self, key):
+ return self._prefix + key
+
+ def get(self, key):
+
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the zset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder)
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
+ if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
+ self._db.setex(self._make_key(key), int(self._timeout), value2)
+ else:
+ self._db.set(self._make_key(key), value2)
+
+ if VERSION[0] == 2:
+ self._db.zadd(self._keys_set, time.time(), key)
+ else:
+ self._db.zadd(self._keys_set, {key: time.time()})
+ self._cache[key] = value
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
+
+ def keys(self):
+ self._expire_keys()
+ return self._db.zrange(self._keys_set, 0, -1)
+
+ def contains(self, key):
+ self._expire_keys()
+ return (self._db.zrank(self._keys_set, key) is not None)
+
+ def delete(self, key):
+ if key in self._cache:
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._db.zrem(self._keys_set, key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ # TODO: there is probably a better way to do this in redis
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py
new file mode 100644
index 00000000..e4c495be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py
@@ -0,0 +1,64 @@
+# (c) 2017, Brian Coca
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: yaml
+ short_description: YAML formatted files.
+ description:
+ - This cache uses YAML formatted, per host, files saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+
+import codecs
+
+import yaml
+
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by yaml files.
+ """
+
+ def _load(self, filepath):
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return AnsibleLoader(f).get_single_data()
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py
new file mode 100644
index 00000000..8309a846
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py
@@ -0,0 +1,61 @@
+# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: actionable
+ type: stdout
+ short_description: shows only items that need attention
+ description:
+ - Use this callback when you dont care about OK nor Skipped.
+ - This callback suppresses any non Failed or Changed status.
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout callback in configuration
+ # Override defaults from 'default' callback plugin
+ options:
+ display_skipped_hosts:
+ name: Show skipped hosts
+ description: "Toggle to control displaying skipped task/host results in a task"
+ type: bool
+ default: no
+ env:
+ - name: DISPLAY_SKIPPED_HOSTS
+ deprecated:
+ why: environment variables without "ANSIBLE_" prefix are deprecated
+ version: "2.0.0" # was Ansible 2.12
+ alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - key: display_skipped_hosts
+ section: defaults
+ display_ok_hosts:
+ name: Show 'ok' hosts
+ description: "Toggle to control displaying 'ok' task/host results in a task"
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_DISPLAY_OK_HOSTS
+ ini:
+ - key: display_ok_hosts
+ section: defaults
+'''
+
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.actionable'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
new file mode 100644
index 00000000..a6dace8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: cgroup_memory_recap
+ type: aggregate
+ requirements:
+ - whitelist in configuration
+ - cgroups
+ short_description: Profiles maximum memory usage of tasks and full execution using cgroups
+ description:
+ - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups
+ notes:
+ - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...)
+ - This cgroup should only be used by ansible to get accurate results
+ - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile)
+ options:
+ max_mem_file:
+ required: True
+ description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes)
+ env:
+ - name: CGROUP_MAX_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: max_mem_file
+ cur_mem_file:
+ required: True
+ description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes)
+ env:
+ - name: CGROUP_CUR_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: cur_mem_file
+'''
+
+import time
+import threading
+
+from ansible.plugins.callback import CallbackBase
+
+
+class MemProf(threading.Thread):
+ """Python thread for recording memory usage"""
+ def __init__(self, path, obj=None):
+ threading.Thread.__init__(self)
+ self.obj = obj
+ self.path = path
+ self.results = []
+ self.running = True
+
+ def run(self):
+ while self.running:
+ with open(self.path) as f:
+ val = f.read()
+ self.results.append(int(val.strip()) / 1024 / 1024)
+ time.sleep(0.001)
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.cgroup_memory_recap'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display)
+
+ self._task_memprof = None
+
+ self.task_results = []
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.cgroup_max_file = self.get_option('max_mem_file')
+ self.cgroup_current_file = self.get_option('cur_mem_file')
+
+ with open(self.cgroup_max_file, 'w+') as f:
+ f.write('0')
+
+ def _profile_memory(self, obj=None):
+ prev_task = None
+ results = None
+ try:
+ self._task_memprof.running = False
+ results = self._task_memprof.results
+ prev_task = self._task_memprof.obj
+ except AttributeError:
+ pass
+
+ if obj is not None:
+ self._task_memprof = MemProf(self.cgroup_current_file, obj=obj)
+ self._task_memprof.start()
+
+ if results is not None:
+ self.task_results.append((prev_task, max(results)))
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._profile_memory(task)
+
+ def v2_playbook_on_stats(self, stats):
+ self._profile_memory()
+
+ with open(self.cgroup_max_file) as f:
+ max_results = int(f.read().strip()) / 1024 / 1024
+
+ self._display.banner('CGROUP MEMORY RECAP')
+ self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results)
+
+ for task, memory in self.task_results:
+ self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py
new file mode 100644
index 00000000..d134f616
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py
@@ -0,0 +1,53 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: context_demo
+ type: aggregate
+ short_description: demo callback that adds play/task context
+ description:
+ - Displays some play and task context along with normal output
+ - This is mostly for demo purposes
+ requirements:
+ - whitelist in configuration
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This is a very trivial example of how any callback function can get at play and task objects.
+ play will be 'None' for runner invocations, and task will be None for 'setup' invocations.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.context_demo'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, *args, **kwargs):
+ super(CallbackModule, self).__init__(*args, **kwargs)
+ self.task = None
+ self.play = None
+
+ def v2_on_any(self, *args, **kwargs):
+ self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task))
+
+ self._display.display(" --- ARGS ")
+ for i, a in enumerate(args):
+ self._display.display(' %s: %s' % (i, a))
+
+ self._display.display(" --- KWARGS ")
+ for k in kwargs:
+ self._display.display(' %s: %s' % (k, kwargs[k]))
+
+ def v2_playbook_on_play_start(self, play):
+ self.play = play
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py
new file mode 100644
index 00000000..89c8b0f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py
@@ -0,0 +1,248 @@
+# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+ Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: counter_enabled
+ type: stdout
+ short_description: adds counters to the output items (tasks and hosts/task)
+ description:
+ - Use this callback when you need a kind of progress bar on a large environments.
+ - You will know how many tasks has the playbook to run, and which one is actually running.
+ - You will know how many hosts may run a task, and which of them is actually running.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
+'''
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.template import Templar
+from ansible.playbook.task_include import TaskInclude
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.counter_enabled'
+
+ _task_counter = 1
+ _task_total = 0
+ _host_counter = 1
+ _host_total = 0
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self._playbook = ""
+ self._play = ""
+
+ def _all_vars(self, host=None, task=None):
+ # host and task need to be specified in case 'magic variables' (host vars, group vars, etc)
+ # need to be loaded as well
+ return self._play.get_variable_manager().get_vars(
+ play=self._play,
+ host=host,
+ task=task
+ )
+
+ def v2_playbook_on_start(self, playbook):
+ self._playbook = playbook
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = u"play"
+ else:
+ msg = u"PLAY [%s]" % name
+
+ self._play = play
+
+ self._display.banner(msg)
+ self._play = play
+
+ self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
+ self._task_total = len(self._play.get_tasks()[0])
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ stat = stats.summarize(host)
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat),
+ colorize(u'ok', stat['ok'], C.COLOR_OK),
+ colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', stat['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', stat['rescued'], C.COLOR_OK),
+ colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat, False),
+ colorize(u'ok', stat['ok'], None),
+ colorize(u'changed', stat['changed'], None),
+ colorize(u'unreachable', stat['unreachable'], None),
+ colorize(u'failed', stat['failures'], None),
+ colorize(u'rescued', stat['rescued'], None),
+ colorize(u'ignored', stat['ignored'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+
+ # print custom stats
+ if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom:
+ # fallback on constants for inherited plugins missing docs
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ args = ''
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it there yet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ' %s' % args
+ self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
+ self._host_counter = 0
+ self._task_counter += 1
+
+ def v2_runner_on_ok(self, result):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if delegated_vars:
+ msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_CHANGED
+ else:
+ if delegated_vars:
+ msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_OK
+
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._clean_results(result._result, result._task.action)
+
+ if self._run_is_verbose(result):
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+ else:
+ self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_on_skipped(self, result):
+ self._host_counter += 1
+
+ if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
+
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ self._host_counter += 1
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
+ else:
+ self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py
new file mode 100644
index 00000000..bec62279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py
@@ -0,0 +1,499 @@
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: dense
+type: stdout
+short_description: minimal stdout output
+extends_documentation_fragment:
+- default_callback
+description:
+- When in verbose mode it will act the same as the default callback
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- set as stdout in configuration
+'''
+
+HAS_OD = False
+try:
+ from collections import OrderedDict
+ HAS_OD = True
+except ImportError:
+ pass
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+from ansible.utils.color import colorize, hostcolor
+from ansible.utils.display import Display
+
+import sys
+
+display = Display()
+
+
+# Design goals:
+#
+# + On screen there should only be relevant stuff
+# - How far are we ? (during run, last line)
+# - What issues occurred
+# - What changes occurred
+# - Diff output (in diff-mode)
+#
+# + If verbosity increases, act as default output
+# So that users can easily switch to default for troubleshooting
+#
+# + Rewrite the output during processing
+# - We use the cursor to indicate where in the task we are.
+# Output after the prompt is the output of the previous task.
+# - If we would clear the line at the start of a task, there would often
+# be no information at all, so we leave it until it gets updated
+#
+# + Use the same color-conventions of Ansible
+#
+# + Ensure the verbose output (-v) is also dense.
+# Remove information that is not essential (eg. timestamps, status)
+
+
+# TODO:
+#
+# + Properly test for terminal capabilities, and fall back to default
+# + Modify Ansible mechanism so we don't need to use sys.stdout directly
+# + Find an elegant solution for progress bar line wrapping
+
+
+# FIXME: Importing constants as C simply does not work, beats me :-/
+# from ansible import constants as C
+class C:
+ COLOR_HIGHLIGHT = 'white'
+ COLOR_VERBOSE = 'blue'
+ COLOR_WARN = 'bright purple'
+ COLOR_ERROR = 'red'
+ COLOR_DEBUG = 'dark gray'
+ COLOR_DEPRECATE = 'purple'
+ COLOR_SKIP = 'cyan'
+ COLOR_UNREACHABLE = 'bright red'
+ COLOR_OK = 'green'
+ COLOR_CHANGED = 'yellow'
+
+
+# Taken from Dstat
+class vt100:
+ black = '\033[0;30m'
+ darkred = '\033[0;31m'
+ darkgreen = '\033[0;32m'
+ darkyellow = '\033[0;33m'
+ darkblue = '\033[0;34m'
+ darkmagenta = '\033[0;35m'
+ darkcyan = '\033[0;36m'
+ gray = '\033[0;37m'
+
+ darkgray = '\033[1;30m'
+ red = '\033[1;31m'
+ green = '\033[1;32m'
+ yellow = '\033[1;33m'
+ blue = '\033[1;34m'
+ magenta = '\033[1;35m'
+ cyan = '\033[1;36m'
+ white = '\033[1;37m'
+
+ blackbg = '\033[40m'
+ redbg = '\033[41m'
+ greenbg = '\033[42m'
+ yellowbg = '\033[43m'
+ bluebg = '\033[44m'
+ magentabg = '\033[45m'
+ cyanbg = '\033[46m'
+ whitebg = '\033[47m'
+
+ reset = '\033[0;0m'
+ bold = '\033[1m'
+ reverse = '\033[2m'
+ underline = '\033[4m'
+
+ clear = '\033[2J'
+# clearline = '\033[K'
+ clearline = '\033[2K'
+ save = '\033[s'
+ restore = '\033[u'
+ save_all = '\0337'
+ restore_all = '\0338'
+ linewrap = '\033[7h'
+ nolinewrap = '\033[7l'
+
+ up = '\033[1A'
+ down = '\033[1B'
+ right = '\033[1C'
+ left = '\033[1D'
+
+
+colors = dict(
+ ok=vt100.darkgreen,
+ changed=vt100.darkyellow,
+ skipped=vt100.darkcyan,
+ ignored=vt100.cyanbg + vt100.red,
+ failed=vt100.darkred,
+ unreachable=vt100.red,
+)
+
+states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the dense callback interface, where screen estate is still valued.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'dense'
+
+ def __init__(self):
+
+ # From CallbackModule
+ self._display = display
+
+ if HAS_OD:
+
+ self.disabled = False
+ self.super_ref = super(CallbackModule, self)
+ self.super_ref.__init__()
+
+ # Attributes to remove from results for more density
+ self.removed_attributes = (
+ # 'changed',
+ 'delta',
+ # 'diff',
+ 'end',
+ 'failed',
+ 'failed_when_result',
+ 'invocation',
+ 'start',
+ 'stdout_lines',
+ )
+
+ # Initiate data structures
+ self.hosts = OrderedDict()
+ self.keep = False
+ self.shown_title = False
+ self.count = dict(play=0, handler=0, task=0)
+ self.type = 'foo'
+
+ # Start immediately on the first line
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
+ self.disabled = True
+
+ def __del__(self):
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+
+ def _add_host(self, result, status):
+ name = result._host.get_name()
+
+ # Add a new status in case a failed task is ignored
+ if status == 'failed' and result._task.ignore_errors:
+ status = 'ignored'
+
+ # Check if we have to update an existing state (when looping over items)
+ if name not in self.hosts:
+ self.hosts[name] = dict(state=status)
+ elif states.index(self.hosts[name]['state']) < states.index(status):
+ self.hosts[name]['state'] = status
+
+ # Store delegated hostname, if needed
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self.hosts[name]['delegate'] = delegated_vars['ansible_host']
+
+ # Print progress bar
+ self._display_progress(result)
+
+# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
+# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
+ # Ensure that tasks with changes/failures stay on-screen
+ if status in ['changed', 'failed', 'unreachable']:
+ self.keep = True
+
+ if self._display.verbosity == 1:
+ # Print task title, if needed
+ self._display_task_banner()
+ self._display_results(result, status)
+
+ def _clean_results(self, result):
+ # Remove non-essential attributes
+ for attr in self.removed_attributes:
+ if attr in result:
+ del(result[attr])
+
+ # Remove empty attributes (list, dict, str)
+ for attr in result.copy():
+ if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
+ if not result[attr]:
+ del(result[attr])
+
+ def _handle_exceptions(self, result):
+ if 'exception' in result:
+ # Remove the exception from the result so it's not shown every time
+ del result['exception']
+
+ if self._display.verbosity == 1:
+ return "An exception occurred during task execution. To see the full traceback, use -vvv."
+
+ def _display_progress(self, result=None):
+ # Always rewrite the complete line
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
+ sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ # Print out each host in its own status-color
+ for name in self.hosts:
+ sys.stdout.write(' ')
+ if self.hosts[name].get('delegate', None):
+ sys.stdout.write(self.hosts[name]['delegate'] + '>')
+ sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
+ sys.stdout.flush()
+
+# if result._result.get('diff', False):
+# sys.stdout.write('\n' + vt100.linewrap)
+ sys.stdout.write(vt100.linewrap)
+
+# self.keep = True
+
+ def _display_task_banner(self):
+ if not self.shown_title:
+ self.shown_title = True
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+ sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ def _display_results(self, result, status):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ self._clean_results(result._result)
+
+ dump = ''
+ if result._task.action == 'include':
+ return
+ elif status == 'ok':
+ return
+ elif status == 'ignored':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'failed':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'unreachable':
+ dump = result._result['msg']
+
+ if not dump:
+ dump = self._dump_results(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ sys.stdout.write(colors[status] + status + ': ')
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
+ else:
+ sys.stdout.write(result._host.get_name())
+
+ sys.stdout.write(': ' + dump + '\n')
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ if status == 'changed':
+ self._handle_warnings(result._result)
+
+ def v2_playbook_on_play_start(self, play):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
+
+ # Reset at the start of each play
+ self.keep = False
+ self.count.update(dict(handler=0, task=0))
+ self.count['play'] += 1
+ self.play = play
+
+ # Write the next play on screen IN UPPERCASE, and make it permanent
+ name = play.get_name().strip()
+ if not name:
+ name = 'unnamed'
+ sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ # Do not clear line, since we want to retain the previous output
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
+
+ # Reset at the start of each task
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'task'
+
+ # Enumerate task if not setup (task names are too long for dense output)
+ if task.get_name() != 'setup':
+ self.count['task'] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+
+ # Reset at the start of each handler
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'handler'
+
+ # Enumerate handler if not setup (handler names may be too long for dense output)
+ if task.get_name() != 'setup':
+ self.count[self.type] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ # TBD
+ sys.stdout.write('cleanup.')
+ sys.stdout.flush()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._add_host(result, 'failed')
+
+ def v2_runner_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ def v2_runner_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_runner_on_unreachable(self, result):
+ self._add_host(result, 'unreachable')
+
+ def v2_runner_on_include(self, included_file):
+ pass
+
+ def v2_runner_on_file_diff(self, result, diff):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_runner_on_file_diff(result, diff)
+ sys.stdout.write(vt100.reset)
+
+ def v2_on_file_diff(self, result):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_on_file_diff(result)
+ sys.stdout.write(vt100.reset)
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_ok(self, result):
+ self.v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_failed(self, result):
+ self.v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._add_host(result, 'failed')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_skipped(self, result):
+ self.v2_runner_item_on_skipped(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_include(self, included_file):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+
+ # In normal mode screen output should be sufficient, summary is redundant
+ if self._display.verbosity == 0:
+ return
+
+ sys.stdout.write(vt100.bold + vt100.underline)
+ sys.stdout.write('SUMMARY')
+
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+ self._display.display(
+ u"%s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN),
+ ),
+ screen_only=True
+ )
+
+
+# When using -vv or higher, simply do the default action
+if display.verbosity >= 2 or not HAS_OD:
+ CallbackModule = CallbackModule_default
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py
new file mode 100644
index 00000000..d24c9145
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py
@@ -0,0 +1,1420 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Trevor Highfill <trevor.highfill@outlook.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ callback: diy
+ type: stdout
+ short_description: Customize the output
+ version_added: 0.2.0
+ description:
+ - Callback plugin that allows you to supply your own custom callback templates to be output.
+ author: Trevor Highfill (@theque5t)
+ extends_documentation_fragment:
+ - default_callback
+ notes:
+ - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided.
+ - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options.
+ The dictionary is only available in the templating context for the options. It is not a variable that is available via the other
+ various execution contexts, such as playbook, play, task etc.
+ - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the
+ respective callback.
+ Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output
+ the top level variable names available to the callback.
+ - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example,
+ C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}")
+ - "**Condition** for all C(msg) options:
+ if value C(is None or omit),
+ then the option is not being used.
+ **Effect**: use of the C(default) callback plugin for output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is not greater than 0),
+ then the option is being used without output.
+ **Effect**: suppress output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is greater than 0),
+ then the option is being used with output.
+ **Effect**: render value as template and output"
+ - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan),
+ C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)"
+ seealso:
+ - name: default – default Ansible screen output
+ description: The official documentation on the B(default) callback plugin.
+ link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html
+ requirements:
+ - set as stdout_callback in configuration
+ options:
+ on_any_msg:
+ description: Output to be used for callback on_any.
+ ini:
+ - section: callback_diy
+ key: on_any_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG
+ vars:
+ - name: ansible_callback_diy_on_any_msg
+ type: str
+
+ on_any_msg_color:
+ description:
+ - Output color to be used for I(on_any_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_any_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_any_msg_color
+ type: str
+
+ runner_on_failed_msg:
+ description: Output to be used for callback runner_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg
+ type: str
+
+ runner_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg_color
+ type: str
+
+ runner_on_ok_msg:
+ description: Output to be used for callback runner_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg
+ type: str
+
+ runner_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg_color
+ type: str
+
+ runner_on_skipped_msg:
+ description: Output to be used for callback runner_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg
+ type: str
+
+ runner_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg_color
+ type: str
+
+ runner_on_unreachable_msg:
+ description: Output to be used for callback runner_on_unreachable.
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg
+ type: str
+
+ runner_on_unreachable_msg_color:
+ description:
+ - Output color to be used for I(runner_on_unreachable_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg_color
+ type: str
+
+ playbook_on_start_msg:
+ description: Output to be used for callback playbook_on_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg
+ type: str
+
+ playbook_on_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg_color
+ type: str
+
+ playbook_on_notify_msg:
+ description: Output to be used for callback playbook_on_notify.
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg
+ type: str
+
+ playbook_on_notify_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_notify_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg_color
+ type: str
+
+ playbook_on_no_hosts_matched_msg:
+ description: Output to be used for callback playbook_on_no_hosts_matched.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg
+ type: str
+
+ playbook_on_no_hosts_matched_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_matched_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color
+ type: str
+
+ playbook_on_no_hosts_remaining_msg:
+ description: Output to be used for callback playbook_on_no_hosts_remaining.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg
+ type: str
+
+ playbook_on_no_hosts_remaining_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_remaining_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color
+ type: str
+
+ playbook_on_task_start_msg:
+ description: Output to be used for callback playbook_on_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg
+ type: str
+
+ playbook_on_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg_color
+ type: str
+
+ playbook_on_handler_task_start_msg:
+ description: Output to be used for callback playbook_on_handler_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg
+ type: str
+
+ playbook_on_handler_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_handler_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color
+ type: str
+
+ playbook_on_vars_prompt_msg:
+ description: Output to be used for callback playbook_on_vars_prompt.
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg
+ type: str
+
+ playbook_on_vars_prompt_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_vars_prompt_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color
+ type: str
+
+ playbook_on_play_start_msg:
+ description: Output to be used for callback playbook_on_play_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg
+ type: str
+
+ playbook_on_play_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_play_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg_color
+ type: str
+
+ playbook_on_stats_msg:
+ description: Output to be used for callback playbook_on_stats.
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg
+ type: str
+
+ playbook_on_stats_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_stats_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg_color
+ type: str
+
+ on_file_diff_msg:
+ description: Output to be used for callback on_file_diff.
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg
+ type: str
+
+ on_file_diff_msg_color:
+ description:
+ - Output color to be used for I(on_file_diff_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg_color
+ type: str
+
+ playbook_on_include_msg:
+ description: Output to be used for callback playbook_on_include.
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg
+ type: str
+
+ playbook_on_include_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_include_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg_color
+ type: str
+
+ runner_item_on_ok_msg:
+ description: Output to be used for callback runner_item_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg
+ type: str
+
+ runner_item_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg_color
+ type: str
+
+ runner_item_on_failed_msg:
+ description: Output to be used for callback runner_item_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg
+ type: str
+
+ runner_item_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg_color
+ type: str
+
+ runner_item_on_skipped_msg:
+ description: Output to be used for callback runner_item_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg
+ type: str
+
+ runner_item_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg_color
+ type: str
+
+ runner_retry_msg:
+ description: Output to be used for callback runner_retry.
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg
+ type: str
+
+ runner_retry_msg_color:
+ description:
+ - Output color to be used for I(runner_retry_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg_color
+ type: str
+
+ runner_on_start_msg:
+ description: Output to be used for callback runner_on_start.
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg
+ type: str
+
+ runner_on_start_msg_color:
+ description:
+ - Output color to be used for I(runner_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg_color
+ type: str
+
+ runner_on_no_hosts_msg:
+ description: Output to be used for callback runner_on_no_hosts.
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg
+ type: str
+
+ runner_on_no_hosts_msg_color:
+ description:
+ - Output color to be used for I(runner_on_no_hosts_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg_color
+ type: str
+
+ playbook_on_setup_msg:
+ description: Output to be used for callback playbook_on_setup.
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg
+ type: str
+
+ playbook_on_setup_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_setup_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg_color
+ type: str
+'''
+
+EXAMPLES = r'''
+ansible.cfg: >
+ # Enable plugin
+ [defaults]
+ stdout_callback=community.general.diy
+
+ [callback_diy]
+ # Output when playbook starts
+ playbook_on_start_msg="DIY output(via ansible.cfg): playbook example: {{ ansible_callback_diy.playbook.file_name }}"
+ playbook_on_start_msg_color=yellow
+
+ # Comment out to allow default plugin output
+ # playbook_on_play_start_msg="PLAY: starting play {{ ansible_callback_diy.play.name }}"
+
+ # Accept on_skipped_msg or ansible_callback_diy_runner_on_skipped_msg as input vars
+ # If neither are supplied, omit the option
+ runner_on_skipped_msg="{{ on_skipped_msg | default(ansible_callback_diy_runner_on_skipped_msg) | default(omit) }}"
+
+ # Newline after every callback
+ # on_any_msg='{{ " " | join("\n") }}'
+
+playbook.yml: >
+ ---
+ - name: "Default plugin output: play example"
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Default plugin output
+ ansible.builtin.debug:
+ msg: default plugin output
+
+ - name: Override from play vars
+ hosts: localhost
+ gather_facts: no
+ vars:
+ ansible_connection: local
+ green: "\e[0m\e[38;5;82m"
+ yellow: "\e[0m\e[38;5;11m"
+ bright_purple: "\e[0m\e[38;5;105m"
+ cyan: "\e[0m\e[38;5;51m"
+ green_bg_black_fg: "\e[0m\e[48;5;40m\e[38;5;232m"
+ yellow_bg_black_fg: "\e[0m\e[48;5;226m\e[38;5;232m"
+ purple_bg_white_fg: "\e[0m\e[48;5;57m\e[38;5;255m"
+ cyan_bg_black_fg: "\e[0m\e[48;5;87m\e[38;5;232m"
+ magenta: "\e[38;5;198m"
+ white: "\e[0m\e[38;5;255m"
+ ansible_callback_diy_playbook_on_play_start_msg: "\n{{green}}DIY output(via play vars): play example: {{magenta}}{{ansible_callback_diy.play.name}}\n\n"
+ ansible_callback_diy_playbook_on_task_start_msg: "DIY output(via play vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: cyan
+ ansible_callback_diy_playbook_on_stats_msg: |+2
+ CUSTOM STATS
+ ==============================
+ {% for key in ansible_callback_diy.stats | sort %}
+ {% if ansible_callback_diy.stats[key] %}
+ {% if key == 'ok' %}
+ {% set color_one = lookup('vars','green_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','green') %}
+ {% elif key == 'changed' %}
+ {% set color_one = lookup('vars','yellow_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','yellow') %}
+ {% elif key == 'processed' %}
+ {% set color_one = lookup('vars','purple_bg_white_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','bright_purple') %}
+ {% elif key == 'skipped' %}
+ {% set color_one = lookup('vars','cyan_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','cyan') %}
+ {% else %}
+ {% set color_one = "" %}
+ {% set prefix = "" %}
+ {% set suffix = "" %}
+ {% set color_two = "" %}
+ {% endif %}
+ {{ color_one }}{{ "%s%s%s" | format(prefix,key,suffix) }}{{ color_two }}: {{ ansible_callback_diy.stats[key] | to_nice_yaml }}
+ {% endif %}
+ {% endfor %}
+
+ tasks:
+ - name: Custom banner with default plugin result output
+ ansible.builtin.debug:
+ msg: "default plugin output: result example"
+
+ - name: Override from task vars
+ ansible.builtin.debug:
+ msg: "example {{ two }}"
+ changed_when: true
+ vars:
+ white_fg_red_bg: "\e[0m\e[48;5;1m"
+ two: "{{ white_fg_red_bg }} 2 "
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright magenta
+ ansible_callback_diy_runner_on_ok_msg: "DIY output(via task vars): result example: \n{{ ansible_callback_diy.result.output.msg }}\n"
+ ansible_callback_diy_runner_on_ok_msg_color: "{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"
+
+ - name: Suppress output
+ ansible.builtin.debug:
+ msg: i should not be displayed
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ ansible_callback_diy_runner_on_ok_msg: ""
+
+ - name: Using alias vars (see ansible.cfg)
+ ansible.builtin.debug:
+ msg:
+ when: False
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n"
+ on_skipped_msg_color: white
+
+ - name: Just stdout
+ ansible.builtin.command: echo some stdout
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\n"
+ ansible_callback_diy_runner_on_ok_msg: "{{ ansible_callback_diy.result.output.stdout }}\n"
+
+ - name: Multiline output
+ ansible.builtin.debug:
+ msg: "{{ multiline }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ multiline: "line\nline\nline"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ some
+ {{ ansible_callback_diy.result.output.msg }}
+ output
+
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright blue
+
+ - name: Indentation
+ ansible.builtin.debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - { indent: 1, msg: one., color: red }
+ - { indent: 2, msg: two.., color: yellow }
+ - { indent: 3, msg: three..., color: bright yellow }
+ vars:
+ ansible_callback_diy_runner_item_on_ok_msg: "{{ ansible_callback_diy.result.output.msg | indent(item.indent, True) }}"
+ ansible_callback_diy_runner_item_on_ok_msg_color: "{{ item.color }}"
+ ansible_callback_diy_runner_on_ok_msg: "GO!!!"
+ ansible_callback_diy_runner_on_ok_msg_color: bright green
+
+ - name: Using lookup and template as file
+ ansible.builtin.shell: "echo {% raw %}'output from {{ file_name }}'{% endraw %} > {{ file_name }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ file_name: diy_file_template_example
+ ansible_callback_diy_runner_on_ok_msg: "{{ lookup('template', file_name) }}"
+
+ - name: 'Look at top level vars available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for var in (ansible_callback_diy.top_level_var_names|reject('match','vars|ansible_callback_diy.*')) | sort %}
+ {{ green }}{{ var }}:
+ {{ white }}{{ lookup('vars', var) }}
+
+ {% endfor %}
+ ansible_callback_diy_runner_on_ok_msg_color: white
+
+ - name: 'Look at event data available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for key in ansible_callback_diy | sort %}
+ {{ green }}{{ key }}:
+ {{ white }}{{ ansible_callback_diy[key] }}
+
+ {% endfor %}
+'''
+
+import sys
+from contextlib import contextmanager
+from ansible import constants as C
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.template import Templar
+from ansible.vars.manager import VariableManager
+from ansible.plugins.callback.default import CallbackModule as Default
+from ansible.module_utils._text import to_text
+
+
+class DummyStdout(object):
+ def flush(self):
+ pass
+
+ def write(self, b):
+ pass
+
+ def writelines(self, l):
+ pass
+
+
+class CallbackModule(Default):
+ """
+ Callback plugin that allows you to supply your own custom callback templates to be output.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.diy'
+
+ DIY_NS = 'ansible_callback_diy'
+
+ @contextmanager
+ def _suppress_stdout(self, enabled):
+ saved_stdout = sys.stdout
+ if enabled:
+ sys.stdout = DummyStdout()
+ yield
+ sys.stdout = saved_stdout
+
+ def _get_output_specification(self, loader, variables):
+ _ret = {}
+ _calling_method = sys._getframe(1).f_code.co_name
+ _callback_type = (_calling_method[3:] if _calling_method[:3] == "v2_" else _calling_method)
+ _callback_options = ['msg', 'msg_color']
+
+ for option in _callback_options:
+ _option_name = '%s_%s' % (_callback_type, option)
+ _option_template = variables.get(
+ self.DIY_NS + "_" + _option_name,
+ self.get_option(_option_name)
+ )
+ _ret.update({option: self._template(
+ loader=loader,
+ template=_option_template,
+ variables=variables
+ )})
+
+ _ret.update({'vars': variables})
+
+ return _ret
+
+ def _using_diy(self, spec):
+ return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit'])
+
+ def _parent_has_callback(self):
+ return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name)
+
+ def _template(self, loader, template, variables):
+ _templar = Templar(loader=loader, variables=variables)
+ return _templar.template(
+ template,
+ preserve_trailing_newlines=True,
+ convert_data=False,
+ escape_backslashes=True
+ )
+
+ def _output(self, spec, stderr=False):
+ _msg = to_text(spec['msg'])
+ if len(_msg) > 0:
+ self._display.display(msg=_msg, color=spec['msg_color'], stderr=stderr)
+
+ def _get_vars(self, playbook, play=None, host=None, task=None, included_file=None,
+ handler=None, result=None, stats=None, remove_attr_ref_loop=True):
+ def _get_value(obj, attr=None, method=None):
+ if attr:
+ return getattr(obj, attr, getattr(obj, "_" + attr, None))
+
+ if method:
+ _method = getattr(obj, method)
+ return _method()
+
+ def _remove_attr_ref_loop(obj, attributes):
+ _loop_var = getattr(obj, 'loop_control', None)
+ _loop_var = (_loop_var or 'item')
+
+ for attr in attributes:
+ if str(_loop_var) in str(_get_value(obj=obj, attr=attr)):
+ attributes.remove(attr)
+
+ return attributes
+
+ class CallbackDIYDict(dict):
+ def __deepcopy__(self, memo):
+ return self
+
+ _ret = {}
+
+ _variable_manager = VariableManager(loader=playbook.get_loader())
+
+ _all = _variable_manager.get_vars()
+ if play:
+ _all = play.get_variable_manager().get_vars(
+ play=play,
+ host=(host if host else getattr(result, '_host', None)),
+ task=(handler if handler else task)
+ )
+ _ret.update(_all)
+
+ _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()}))
+
+ _ret[self.DIY_NS].update({'playbook': {}})
+ _playbook_attributes = ['entries', 'file_name', 'basedir']
+
+ for attr in _playbook_attributes:
+ _ret[self.DIY_NS]['playbook'].update({attr: _get_value(obj=playbook, attr=attr)})
+
+ if play:
+ _ret[self.DIY_NS].update({'play': {}})
+ _play_attributes = ['any_errors_fatal', 'become', 'become_flags', 'become_method',
+ 'become_user', 'check_mode', 'collections', 'connection',
+ 'debugger', 'diff', 'environment', 'fact_path', 'finalized',
+ 'force_handlers', 'gather_facts', 'gather_subset',
+ 'gather_timeout', 'handlers', 'hosts', 'ignore_errors',
+ 'ignore_unreachable', 'included_conditional', 'included_path',
+ 'max_fail_percentage', 'module_defaults', 'name', 'no_log',
+ 'only_tags', 'order', 'port', 'post_tasks', 'pre_tasks',
+ 'remote_user', 'removed_hosts', 'roles', 'run_once', 'serial',
+ 'skip_tags', 'squashed', 'strategy', 'tags', 'tasks', 'uuid',
+ 'validated', 'vars_files', 'vars_prompt']
+
+ for attr in _play_attributes:
+ _ret[self.DIY_NS]['play'].update({attr: _get_value(obj=play, attr=attr)})
+
+ if host:
+ _ret[self.DIY_NS].update({'host': {}})
+ _host_attributes = ['name', 'uuid', 'address', 'implicit']
+
+ for attr in _host_attributes:
+ _ret[self.DIY_NS]['host'].update({attr: _get_value(obj=host, attr=attr)})
+
+ if task:
+ _ret[self.DIY_NS].update({'task': {}})
+ _task_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'loop', 'loop_control', 'loop_with',
+ 'module_defaults', 'name', 'no_log', 'notify', 'parent', 'poll',
+ 'port', 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ # remove arguments that reference a loop var because they cause templating issues in
+ # callbacks that do not have the loop context(e.g. playbook_on_task_start)
+ if task.loop and remove_attr_ref_loop:
+ _task_attributes = _remove_attr_ref_loop(obj=task, attributes=_task_attributes)
+
+ for attr in _task_attributes:
+ _ret[self.DIY_NS]['task'].update({attr: _get_value(obj=task, attr=attr)})
+
+ if included_file:
+ _ret[self.DIY_NS].update({'included_file': {}})
+ _included_file_attributes = ['args', 'filename', 'hosts', 'is_role', 'task']
+
+ for attr in _included_file_attributes:
+ _ret[self.DIY_NS]['included_file'].update({attr: _get_value(
+ obj=included_file,
+ attr=attr
+ )})
+
+ if handler:
+ _ret[self.DIY_NS].update({'handler': {}})
+ _handler_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'listen', 'loop', 'loop_control',
+ 'loop_with', 'module_defaults', 'name', 'no_log',
+ 'notified_hosts', 'notify', 'parent', 'poll', 'port',
+ 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ if handler.loop and remove_attr_ref_loop:
+ _handler_attributes = _remove_attr_ref_loop(obj=handler,
+ attributes=_handler_attributes)
+
+ for attr in _handler_attributes:
+ _ret[self.DIY_NS]['handler'].update({attr: _get_value(obj=handler, attr=attr)})
+
+ _ret[self.DIY_NS]['handler'].update({'is_host_notified': handler.is_host_notified(host)})
+
+ if result:
+ _ret[self.DIY_NS].update({'result': {}})
+ _result_attributes = ['host', 'task', 'task_name']
+
+ for attr in _result_attributes:
+ _ret[self.DIY_NS]['result'].update({attr: _get_value(obj=result, attr=attr)})
+
+ _result_methods = ['is_changed', 'is_failed', 'is_skipped', 'is_unreachable']
+
+ for method in _result_methods:
+ _ret[self.DIY_NS]['result'].update({method: _get_value(obj=result, method=method)})
+
+ _ret[self.DIY_NS]['result'].update({'output': getattr(result, '_result', None)})
+
+ _ret.update(result._result)
+
+ if stats:
+ _ret[self.DIY_NS].update({'stats': {}})
+ _stats_attributes = ['changed', 'custom', 'dark', 'failures', 'ignored',
+ 'ok', 'processed', 'rescued', 'skipped']
+
+ for attr in _stats_attributes:
+ _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
+
+ _ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
+
+ return _ret
+
+ def v2_on_any(self, *args, **kwargs):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_any(*args, **kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec, stderr=(not ignore_errors))
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_ok(result)
+
+ def v2_runner_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_skipped(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_unreachable(result)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_poll(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_ok(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_failed(self, result):
+ pass
+
+ def v2_runner_item_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_skipped(result)
+
+ def v2_runner_retry(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_retry(result)
+
+ def v2_runner_on_start(self, host, task):
+ self._diy_host = host
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_start(host, task)
+
+ def v2_playbook_on_start(self, playbook):
+ self._diy_playbook = playbook
+ self._diy_loader = self._diy_playbook.get_loader()
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+
+ def v2_playbook_on_notify(self, handler, host):
+ self._diy_handler = handler
+ self._diy_host = host
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ handler=self._diy_handler
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_notify(handler, host)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_handler_task_start(task)
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None,
+ confirm=False, salt_size=None, salt=None, default=None,
+ unsafe=None):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_vars_prompt(
+ varname, private, prompt, encrypt,
+ confirm, salt_size, salt, default,
+ unsafe
+ )
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ self._diy_play = play
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_play_start(play)
+
+ def v2_playbook_on_stats(self, stats):
+ self._diy_stats = stats
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ stats=self._diy_stats
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_stats(stats)
+
+ def v2_playbook_on_include(self, included_file):
+ self._diy_included_file = included_file
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_included_file._task,
+ included_file=self._diy_included_file
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_include(included_file)
+
+ def v2_on_file_diff(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_file_diff(result)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py
new file mode 100644
index 00000000..9fce6970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py
@@ -0,0 +1,76 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: full_skip
+ type: stdout
+ short_description: suppresses tasks if all hosts skipped
+ description:
+ - Use this plugin when you do not care about any output for tasks that were completely skipped
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.full_skip'
+
+ def v2_runner_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_playbook_item_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_runner_item_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.display()
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.outlines = []
+ self.outlines.append("TASK [%s]" % task.get_name().strip())
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self.outlines.append("task path: %s" % path)
+
+ def v2_playbook_item_on_ok(self, result):
+ self.display()
+ super(CallbackModule, self).v2_playbook_item_on_ok(result)
+
+ def v2_runner_on_ok(self, result):
+ self.display()
+ super(CallbackModule, self).v2_runner_on_ok(result)
+
+ def display(self):
+ if len(self.outlines) == 0:
+ return
+ (first, rest) = self.outlines[0], self.outlines[1:]
+ self._display.banner(first)
+ for line in rest:
+ self._display.display(line)
+ self.outlines = []
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py
new file mode 100644
index 00000000..efe4e94e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py
@@ -0,0 +1,228 @@
+# (C) 2014, Matt Martz <matt@sivel.net>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: hipchat
+ type: notification
+ requirements:
+ - whitelist in configuration.
+ - prettytable (python lib)
+ short_description: post task events to hipchat
+ description:
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ - Before 2.4 only environment variables were available for configuring this plugin.
+ options:
+ token:
+ description: HipChat API token for v1 or v2 API.
+ required: True
+ env:
+ - name: HIPCHAT_TOKEN
+ ini:
+ - section: callback_hipchat
+ key: token
+ api_version:
+ description: HipChat API version, v1 or v2.
+ required: False
+ default: v1
+ env:
+ - name: HIPCHAT_API_VERSION
+ ini:
+ - section: callback_hipchat
+ key: api_version
+ room:
+ description: HipChat room to post in.
+ default: ansible
+ env:
+ - name: HIPCHAT_ROOM
+ ini:
+ - section: callback_hipchat
+ key: room
+ from:
+ description: Name to post as
+ default: ansible
+ env:
+ - name: HIPCHAT_FROM
+ ini:
+ - section: callback_hipchat
+ key: from
+ notify:
+ description: Add notify flag to important messages
+ type: bool
+ default: True
+ env:
+ - name: HIPCHAT_NOTIFY
+ ini:
+ - section: callback_hipchat
+ key: notify
+
+'''
+
+import os
+import json
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+
+class CallbackModule(CallbackBase):
+ """This is an example ansible callback plugin that sends status
+ updates to a HipChat channel during playbook execution.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.hipchat'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
+ API_V2_URL = 'https://api.hipchat.com/v2/'
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not installed. '
+ 'Disabling the HipChat callback plugin.')
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.token = self.get_option('token')
+ self.api_version = self.get_option('api_version')
+ self.from_name = self.get_option('from')
+ self.allow_notify = self.get_option('notify')
+ self.room = self.get_option('room')
+
+ if self.token is None:
+ self.disabled = True
+ self._display.warning('HipChat token could not be loaded. The HipChat '
+ 'token can be provided using the `HIPCHAT_TOKEN` '
+ 'environment variable.')
+
+ # Pick the request handler.
+ if self.api_version == 'v2':
+ self.send_msg = self.send_msg_v2
+ else:
+ self.send_msg = self.send_msg_v1
+
+ def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
+
+ body = {}
+ body['room_id'] = self.room
+ body['from'] = self.from_name[:15] # max length is 15
+ body['message'] = msg
+ body['message_format'] = msg_format
+ body['color'] = color
+ body['notify'] = self.allow_notify and notify
+
+ data = json.dumps(body)
+ url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
+ try:
+ response = open_url(url, data=data, headers=headers, method='POST')
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ params = {}
+ params['room_id'] = self.room
+ params['from'] = self.from_name[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['notify'] = int(self.allow_notify and notify)
+
+ url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
+ try:
+ response = open_url(url, data=urlencode(params))
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+
+ self.play = play
+ name = play.name
+ # This block sends information about a playbook when it starts
+ # The playbook object is not immediately available at
+ # playbook_on_start so we grab it via the play
+ #
+ # Displays info about playbook being started by a person on an
+ # inventory, as well as Tags, Skip Tags and Limits
+ if not self.printed_playbook:
+ self.playbook_name, _ = os.path.splitext(
+ os.path.basename(self.play.playbook.filename))
+ host_list = self.play.playbook.inventory.host_list
+ inventory = os.path.basename(os.path.realpath(host_list))
+ self.send_msg("%s: Playbook initiated by %s against %s" %
+ (self.playbook_name,
+ self.play.playbook.remote_user,
+ inventory), notify=True)
+ self.printed_playbook = True
+ subset = self.play.playbook.inventory._subset
+ skip_tags = self.play.playbook.skip_tags
+ self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
+ (self.playbook_name,
+ ', '.join(self.play.playbook.only_tags),
+ ', '.join(skip_tags) if skip_tags else None,
+ ', '.join(subset) if subset else subset))
+
+ # This is where we actually say we are starting a play
+ self.send_msg("%s: Starting play: %s" %
+ (self.playbook_name, name))
+
+ def playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures']])
+
+ self.send_msg("%s: Playbook complete" % self.playbook_name,
+ notify=True)
+
+ if failures or unreachable:
+ color = 'red'
+ self.send_msg("%s: Failures detected" % self.playbook_name,
+ color=color, notify=True)
+ else:
+ color = 'green'
+
+ self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py
new file mode 100644
index 00000000..01abde17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: jabber
+ type: notification
+ short_description: post task events to a jabber server
+ description:
+ - The chatty part of ChatOps with a Hipchat server as a target
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ requirements:
+ - xmpp (python lib https://github.com/ArchipelProject/xmpppy)
+ options:
+ server:
+ description: connection info to jabber server
+ required: True
+ env:
+ - name: JABBER_SERV
+ user:
+ description: Jabber user to authenticate as
+ required: True
+ env:
+ - name: JABBER_USER
+ password:
+ description: Password for the user to the jabber server
+ required: True
+ env:
+ - name: JABBER_PASS
+ to:
+ description: chat identifier that will receive the message
+ required: True
+ env:
+ - name: JABBER_TO
+'''
+
+import os
+
+HAS_XMPP = True
+try:
+ import xmpp
+except ImportError:
+ HAS_XMPP = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.jabber'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_XMPP:
+ self._display.warning("The required python xmpp library (xmpppy) is not installed. "
+ "pip install git+https://github.com/ArchipelProject/xmpppy")
+ self.disabled = True
+
+ self.serv = os.getenv('JABBER_SERV')
+ self.j_user = os.getenv('JABBER_USER')
+ self.j_pass = os.getenv('JABBER_PASS')
+ self.j_to = os.getenv('JABBER_TO')
+
+ if (self.j_user or self.j_pass or self.serv or self.j_to) is None:
+ self.disabled = True
+ self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables')
+
+ def send_msg(self, msg):
+ """Send message"""
+ jid = xmpp.JID(self.j_user)
+ client = xmpp.Client(self.serv, debug=[])
+ client.connect(server=(self.serv, 5222))
+ client.auth(jid.getNode(), self.j_pass, resource=jid.getResource())
+ message = xmpp.Message(self.j_to, msg)
+ message.setAttr('type', 'chat')
+ client.send(message)
+ client.disconnect()
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+ self.debug = self._dump_results(result._result)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+ self.play = play
+ name = play.name
+ self.send_msg("Ansible starting play: %s" % (name))
+
+ def playbook_on_stats(self, stats):
+ name = self.play
+ hosts = sorted(stats.processed.keys())
+ failures = False
+ unreachable = False
+ for h in hosts:
+ s = stats.summarize(h)
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ out = self.debug
+ self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
+ else:
+ out = self.debug
+ self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py
new file mode 100644
index 00000000..d184b9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py
@@ -0,0 +1,123 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: log_plays
+ type: notification
+ short_description: write playbook output to log file
+ description:
+ - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory
+ requirements:
+ - Whitelist in configuration
+ - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
+ options:
+ log_folder:
+ default: /var/log/ansible/hosts
+ description: The folder where log files will be created.
+ env:
+ - name: ANSIBLE_LOG_FOLDER
+ ini:
+ - section: callback_log_plays
+ key: log_folder
+'''
+
+import os
+import time
+import json
+
+from ansible.utils.path import makedirs_safe
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+# NOTE: in Ansible 1.2 or later general logging is available without
+# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
+# or log_path in the DEFAULTS section of your ansible configuration
+# file. This callback is an example of per hosts logging for those
+# that want it.
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.log_plays'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ TIME_FORMAT = "%b %d %Y %H:%M:%S"
+ MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n"
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.log_folder = self.get_option("log_folder")
+
+ if not os.path.exists(self.log_folder):
+ makedirs_safe(self.log_folder)
+
+ def log(self, result, category):
+ data = result._result
+ if isinstance(data, MutableMapping):
+ if '_ansible_verbose_override' in data:
+ # avoid logging extraneous data
+ data = 'omitted'
+ else:
+ data = data.copy()
+ invocation = data.pop('invocation', None)
+ data = json.dumps(data, cls=AnsibleJSONEncoder)
+ if invocation is not None:
+ data = json.dumps(invocation) + " => %s " % data
+
+ path = os.path.join(self.log_folder, result._host.get_name())
+ now = time.strftime(self.TIME_FORMAT, time.localtime())
+
+ msg = to_bytes(
+ self.MSG_FORMAT
+ % dict(
+ now=now,
+ playbook=self.playbook,
+ task_name=result._task.name,
+ task_action=result._task.action,
+ category=category,
+ data=data,
+ )
+ )
+ with open(path, "ab") as fd:
+ fd.write(msg)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.log(result, 'FAILED')
+
+ def v2_runner_on_ok(self, result):
+ self.log(result, 'OK')
+
+ def v2_runner_on_skipped(self, result):
+ self.log(result, 'SKIPPED')
+
+ def v2_runner_on_unreachable(self, result):
+ self.log(result, 'UNREACHABLE')
+
+ def v2_runner_on_async_failed(self, result):
+ self.log(result, 'ASYNC_FAILED')
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook._file_name
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ self.log(result, 'IMPORTED', imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ self.log(result, 'NOTIMPORTED', missing_file)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py
new file mode 100644
index 00000000..53bc7114
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py
@@ -0,0 +1,208 @@
+# (c) 2018, Samir Musali <samir.musali@logdna.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logdna
+ type: aggregate
+ short_description: Sends playbook logs to LogDNA
+ description:
+ - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
+ requirements:
+ - LogDNA Python Library (https://github.com/logdna/python)
+ - whitelisting in configuration
+ options:
+ conf_key:
+ required: True
+ description: LogDNA Ingestion Key
+ type: string
+ env:
+ - name: LOGDNA_INGESTION_KEY
+ ini:
+ - section: callback_logdna
+ key: conf_key
+ plugin_ignore_errors:
+ required: False
+ description: Whether to ignore errors on failing or not
+ type: boolean
+ env:
+ - name: ANSIBLE_IGNORE_ERRORS
+ ini:
+ - section: callback_logdna
+ key: plugin_ignore_errors
+ default: False
+ conf_hostname:
+ required: False
+ description: Alternative Host Name; the current host name by default
+ type: string
+ env:
+ - name: LOGDNA_HOSTNAME
+ ini:
+ - section: callback_logdna
+ key: conf_hostname
+ conf_tags:
+ required: False
+ description: Tags
+ type: string
+ env:
+ - name: LOGDNA_TAGS
+ ini:
+ - section: callback_logdna
+ key: conf_tags
+ default: ansible
+'''
+
+import logging
+import json
+import socket
+from uuid import getnode
+from ansible.plugins.callback import CallbackBase
+from ansible.parsing.ajson import AnsibleJSONEncoder
+
+try:
+ from logdna import LogDNAHandler
+ HAS_LOGDNA = True
+except ImportError:
+ HAS_LOGDNA = False
+
+
+# Getting MAC Address of system:
+def get_mac():
+ mac = "%012x" % getnode()
+ return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2))))
+
+
+# Getting hostname of system:
+def get_hostname():
+ return str(socket.gethostname()).split('.local')[0]
+
+
+# Getting IP of system:
+def get_ip():
+ try:
+ return socket.gethostbyname(get_hostname())
+ except Exception:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ s.connect(('10.255.255.255', 1))
+ IP = s.getsockname()[0]
+ except Exception:
+ IP = '127.0.0.1'
+ finally:
+ s.close()
+ return IP
+
+
+# Is it JSON?
+def isJSONable(obj):
+ try:
+ json.dumps(obj, sort_keys=True, cls=AnsibleJSONEncoder)
+ return True
+ except Exception:
+ return False
+
+
+# LogDNA Callback Module:
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 0.1
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.logdna'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+
+ self.disabled = True
+ self.playbook_name = None
+ self.playbook = None
+ self.conf_key = None
+ self.plugin_ignore_errors = None
+ self.conf_hostname = None
+ self.conf_tags = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.conf_key = self.get_option('conf_key')
+ self.plugin_ignore_errors = self.get_option('plugin_ignore_errors')
+ self.conf_hostname = self.get_option('conf_hostname')
+ self.conf_tags = self.get_option('conf_tags')
+ self.mac = get_mac()
+ self.ip = get_ip()
+
+ if self.conf_hostname is None:
+ self.conf_hostname = get_hostname()
+
+ self.conf_tags = self.conf_tags.split(',')
+
+ if HAS_LOGDNA:
+ self.log = logging.getLogger('logdna')
+ self.log.setLevel(logging.INFO)
+ self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True}
+ self.log.addHandler(LogDNAHandler(self.conf_key, self.options))
+ self.disabled = False
+ else:
+ self.disabled = True
+ self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`')
+
+ def metaIndexing(self, meta):
+ invalidKeys = []
+ ninvalidKeys = 0
+ for key, value in meta.items():
+ if not isJSONable(value):
+ invalidKeys.append(key)
+ ninvalidKeys += 1
+ if ninvalidKeys > 0:
+ for key in invalidKeys:
+ del meta[key]
+ meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys)
+ return meta
+
+ def sanitizeJSON(self, data):
+ try:
+ return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder))
+ except Exception:
+ return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]}
+
+ def flush(self, log, options):
+ if HAS_LOGDNA:
+ self.log.info(json.dumps(log), options)
+
+ def sendLog(self, host, category, logdata):
+ options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}}
+ logdata['info'].pop('invocation', None)
+ warnings = logdata['info'].pop('warnings', None)
+ if warnings is not None:
+ self.flush({'warn': warnings}, options)
+ self.flush(logdata, options)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.playbook_name = playbook._file_name
+
+ def v2_playbook_on_stats(self, stats):
+ result = dict()
+ for host in stats.processed.keys():
+ result[host] = stats.summarize(host)
+ self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)})
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ if self.plugin_ignore_errors:
+ ignore_errors = self.plugin_ignore_errors
+ self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors})
+
+ def runner_on_ok(self, host, res):
+ self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_unreachable(self, host, res):
+ self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid})
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid})
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py
new file mode 100644
index 00000000..c6bc9935
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py
@@ -0,0 +1,330 @@
+# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logentries
+ type: notification
+ short_description: Sends events to Logentries
+ description:
+ - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
+ - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini
+ - In 2.4 and above you can just put it in the main Ansible configuration file.
+ requirements:
+ - whitelisting in configuration
+ - certifi (python library)
+ - flatdict (python library), if you want to use the 'flatten' option
+ options:
+ api:
+ description: URI to the Logentries API
+ env:
+ - name: LOGENTRIES_API
+ default: data.logentries.com
+ ini:
+ - section: callback_logentries
+ key: api
+ port:
+ description: HTTP port to use when connecting to the API
+ env:
+ - name: LOGENTRIES_PORT
+ default: 80
+ ini:
+ - section: callback_logentries
+ key: port
+ tls_port:
+ description: Port to use when connecting to the API when TLS is enabled
+ env:
+ - name: LOGENTRIES_TLS_PORT
+ default: 443
+ ini:
+ - section: callback_logentries
+ key: tls_port
+ token:
+ description: The logentries "TCP token"
+ env:
+ - name: LOGENTRIES_ANSIBLE_TOKEN
+ required: True
+ ini:
+ - section: callback_logentries
+ key: token
+ use_tls:
+ description:
+ - Toggle to decide whether to use TLS to encrypt the communications with the API server
+ env:
+ - name: LOGENTRIES_USE_TLS
+ default: False
+ type: boolean
+ ini:
+ - section: callback_logentries
+ key: use_tls
+ flatten:
+ description: flatten complex data structures into a single dictionary with complex keys
+ type: boolean
+ default: False
+ env:
+ - name: LOGENTRIES_FLATTEN
+ ini:
+ - section: callback_logentries
+ key: flatten
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+
+ [defaults]
+ callback_whitelist = community.general.logentries
+
+ Either set the environment variables
+ export LOGENTRIES_API=data.logentries.com
+ export LOGENTRIES_PORT=10000
+ export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
+
+ Or in the main Ansible config file
+ [callback_logentries]
+ api = data.logentries.com
+ port = 10000
+ tls_port = 20000
+ use_tls = no
+ token = dd21fc88-f00a-43ff-b977-e3a4233c53af
+ flatten = False
+'''
+
+import os
+import socket
+import random
+import time
+import uuid
+
+try:
+ import certifi
+ HAS_CERTIFI = True
+except ImportError:
+ HAS_CERTIFI = False
+
+try:
+ import flatdict
+ HAS_FLATDICT = True
+except ImportError:
+ HAS_FLATDICT = False
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+
+# Todo:
+# * Better formatting of output before sending out to logentries data/api nodes.
+
+
+class PlainTextSocketAppender(object):
+ def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443):
+
+ self.LE_API = LE_API
+ self.LE_PORT = LE_PORT
+ self.LE_TLS_PORT = LE_TLS_PORT
+ self.MIN_DELAY = 0.1
+ self.MAX_DELAY = 10
+ # Error message displayed when an incorrect Token has been detected
+ self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
+ # Unicode Line separator character \u2028
+ self.LINE_SEP = u'\u2028'
+
+ self._display = display
+ self._conn = None
+
+ def open_connection(self):
+ self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._conn.connect((self.LE_API, self.LE_PORT))
+
+ def reopen_connection(self):
+ self.close_connection()
+
+ root_delay = self.MIN_DELAY
+ while True:
+ try:
+ self.open_connection()
+ return
+ except Exception as e:
+ self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
+
+ root_delay *= 2
+ if root_delay > self.MAX_DELAY:
+ root_delay = self.MAX_DELAY
+
+ wait_for = root_delay + random.uniform(0, root_delay)
+
+ try:
+ self._display.vvvv("sleeping %s before retry" % wait_for)
+ time.sleep(wait_for)
+ except KeyboardInterrupt:
+ raise
+
+ def close_connection(self):
+ if self._conn is not None:
+ self._conn.close()
+
+ def put(self, data):
+ # Replace newlines with Unicode line separator
+ # for multi-line events
+ data = to_text(data, errors='surrogate_or_strict')
+ multiline = data.replace(u'\n', self.LINE_SEP)
+ multiline += u"\n"
+ # Send data, reconnect if needed
+ while True:
+ try:
+ self._conn.send(to_bytes(multiline, errors='surrogate_or_strict'))
+ except socket.error:
+ self.reopen_connection()
+ continue
+ break
+
+ self.close_connection()
+
+
+try:
+ import ssl
+ HAS_SSL = True
+except ImportError: # for systems without TLS support.
+ SocketAppender = PlainTextSocketAppender
+ HAS_SSL = False
+else:
+
+ class TLSSocketAppender(PlainTextSocketAppender):
+ def open_connection(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock = ssl.wrap_socket(
+ sock=sock,
+ keyfile=None,
+ certfile=None,
+ server_side=False,
+ cert_reqs=ssl.CERT_REQUIRED,
+ ssl_version=getattr(
+ ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
+ ca_certs=certifi.where(),
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, )
+ sock.connect((self.LE_API, self.LE_TLS_PORT))
+ self._conn = sock
+
+ SocketAppender = TLSSocketAppender
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.logentries'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ # TODO: allow for alternate posting methods (REST/UDP/agent/etc)
+ super(CallbackModule, self).__init__()
+
+ # verify dependencies
+ if not HAS_SSL:
+ self._display.warning("Unable to import ssl module. Will send over port 80.")
+
+ if not HAS_CERTIFI:
+ self.disabled = True
+ self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self.le_jobid = str(uuid.uuid4())
+
+ # FIXME: make configurable, move to options
+ self.timeout = 10
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # get options
+ try:
+ self.api_url = self.get_option('api')
+ self.api_port = self.get_option('port')
+ self.api_tls_port = self.get_option('tls_port')
+ self.use_tls = self.get_option('use_tls')
+ self.flatten = self.get_option('flatten')
+ except KeyError as e:
+ self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
+ self.disabled = True
+
+ try:
+ self.token = self.get_option('token')
+ except KeyError as e:
+ self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling')
+ self.disabled = True
+
+ if self.flatten and not HAS_FLATDICT:
+ self.disabled = True
+ self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self._initialize_connections()
+
+ def _initialize_connections(self):
+
+ if not self.disabled:
+ if self.use_tls:
+ self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
+ self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
+ else:
+ self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
+ self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
+ self._appender.reopen_connection()
+
+ def emit_formatted(self, record):
+ if self.flatten:
+ results = flatdict.FlatDict(record)
+ self.emit(self._dump_results(results))
+ else:
+ self.emit(self._dump_results(record))
+
+ def emit(self, record):
+ msg = record.rstrip('\n')
+ msg = "{0} {1}".format(self.token, msg)
+ self._appender.put(msg)
+ self._display.vvvv("Sent event to logentries")
+
+ def _set_info(self, host, res):
+ return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res}
+
+ def runner_on_ok(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'OK'
+ self.emit_formatted(results)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ results = self._set_info(host, res)
+ results['status'] = 'FAILED'
+ self.emit_formatted(results)
+
+ def runner_on_skipped(self, host, item=None):
+ results = self._set_info(host, item)
+ del results['results']
+ results['status'] = 'SKIPPED'
+ self.emit_formatted(results)
+
+ def runner_on_unreachable(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'UNREACHABLE'
+ self.emit_formatted(results)
+
+ def runner_on_async_failed(self, host, res, jid):
+ results = self._set_info(host, res)
+ results['jid'] = jid
+ results['status'] = 'ASYNC_FAILED'
+ self.emit_formatted(results)
+
+ def v2_playbook_on_play_start(self, play):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['started_by'] = os.getlogin()
+ if play.name:
+ results['play'] = play.name
+ results['hosts'] = play.hosts
+ self.emit_formatted(results)
+
+ def playbook_on_stats(self, stats):
+ """ close connection """
+ self._appender.close_connection()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py
new file mode 100644
index 00000000..ad1b2b0f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py
@@ -0,0 +1,248 @@
+# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
+# (C) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logstash
+ type: notification
+ short_description: Sends events to Logstash
+ description:
+ - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash
+ requirements:
+ - whitelisting in configuration
+ - logstash (python library)
+ options:
+ server:
+ description: Address of the Logstash server
+ env:
+ - name: LOGSTASH_SERVER
+ ini:
+ - section: callback_logstash
+ key: server
+ version_added: 1.0.0
+ default: localhost
+ port:
+ description: Port on which logstash is listening
+ env:
+ - name: LOGSTASH_PORT
+ ini:
+ - section: callback_logstash
+ key: port
+ version_added: 1.0.0
+ default: 5000
+ type:
+ description: Message type
+ env:
+ - name: LOGSTASH_TYPE
+ ini:
+ - section: callback_logstash
+ key: type
+ version_added: 1.0.0
+ default: ansible
+'''
+
+import os
+import json
+import socket
+import uuid
+from datetime import datetime
+
+import logging
+
+try:
+ import logstash
+ HAS_LOGSTASH = True
+except ImportError:
+ HAS_LOGSTASH = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ ansible logstash callback plugin
+ ansible.cfg:
+ callback_plugins = <path_to_callback_plugins_folder>
+ callback_whitelist = logstash
+ and put the plugin in <path_to_callback_plugins_folder>
+
+ logstash config:
+ input {
+ tcp {
+ port => 5000
+ codec => json
+ }
+ }
+
+ Requires:
+ python-logstash
+
+ This plugin makes use of the following environment variables or ini config:
+ LOGSTASH_SERVER (optional): defaults to localhost
+ LOGSTASH_PORT (optional): defaults to 5000
+ LOGSTASH_TYPE (optional): defaults to ansible
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.logstash'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_LOGSTASH:
+ self.disabled = True
+ self._display.warning("The required python-logstash is not installed. "
+ "pip install python-logstash")
+
+ self.start_time = datetime.utcnow()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.logger = logging.getLogger('python-logstash-logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.logstash_server = self.get_option('server')
+ self.logstash_port = self.get_option('port')
+ self.logstash_type = self.get_option('type')
+ self.handler = logstash.TCPLogstashHandler(
+ self.logstash_server,
+ int(self.logstash_port),
+ version=1,
+ message_type=self.logstash_type
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+ self.session = str(uuid.uuid1())
+ self.errors = 0
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook._file_name
+ data = {
+ 'status': "OK",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "start",
+ 'ansible_playbook': self.playbook,
+ }
+ self.logger.info("ansible start", extra=data)
+
+ def v2_playbook_on_stats(self, stats):
+ end_time = datetime.utcnow()
+ runtime = end_time - self.start_time
+ summarize_stat = {}
+ for host in stats.processed.keys():
+ summarize_stat[host] = stats.summarize(host)
+
+ if self.errors == 0:
+ status = "OK"
+ else:
+ status = "FAILED"
+
+ data = {
+ 'status': status,
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "finish",
+ 'ansible_playbook': self.playbook,
+ 'ansible_playbook_duration': runtime.total_seconds(),
+ 'ansible_result': json.dumps(summarize_stat),
+ }
+ self.logger.info("ansible stats", extra=data)
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ data = {
+ 'status': "OK",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.logger.info("ansible ok", extra=data)
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ data = {
+ 'status': "SKIPPED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_task': result._task,
+ 'ansible_host': result._host.name
+ }
+ self.logger.info("ansible skipped", extra=data)
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ data = {
+ 'status': "IMPORTED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "import",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'imported_file': imported_file
+ }
+ self.logger.info("ansible import", extra=data)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ data = {
+ 'status': "NOT IMPORTED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "import",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'missing_file': missing_file
+ }
+ self.logger.info("ansible import", extra=data)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ data = {
+ 'status': "FAILED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.errors += 1
+ self.logger.error("ansible failed", extra=data)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ data = {
+ 'status': "UNREACHABLE",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.logger.error("ansible unreachable", extra=data)
+
+ def v2_runner_on_async_failed(self, result, **kwargs):
+ data = {
+ 'status': "FAILED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.errors += 1
+ self.logger.error("ansible async", extra=data)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py
new file mode 100644
index 00000000..2172f45c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: mail
+type: notification
+short_description: Sends failure events via email
+description:
+- This callback will report failures via email
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- whitelisting in configuration
+options:
+ mta:
+ description: Mail Transfer Agent, server that accepts SMTP
+ env:
+ - name: SMTPHOST
+ ini:
+ - section: callback_mail
+ key: smtphost
+ default: localhost
+ mtaport:
+ description: Mail Transfer Agent Port, port at which server SMTP
+ ini:
+ - section: callback_mail
+ key: smtpport
+ default: 25
+ to:
+ description: Mail recipient
+ ini:
+ - section: callback_mail
+ key: to
+ default: root
+ sender:
+ description: Mail sender
+ ini:
+ - section: callback_mail
+ key: sender
+ cc:
+ description: CC'd recipient
+ ini:
+ - section: callback_mail
+ key: cc
+ bcc:
+ description: BCC'd recipient
+ ini:
+ - section: callback_mail
+ key: bcc
+notes:
+- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
+'''
+
+import json
+import os
+import re
+import smtplib
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ ''' This Ansible callback plugin mails errors to interested parties. '''
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.mail'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.sender = None
+ self.to = 'root'
+ self.smtphost = os.getenv('SMTPHOST', 'localhost')
+ self.smtpport = 25
+ self.cc = None
+ self.bcc = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.sender = self.get_option('sender')
+ self.to = self.get_option('to')
+ self.smtphost = self.get_option('mta')
+ self.smtpport = int(self.get_option('mtaport'))
+ self.cc = self.get_option('cc')
+ self.bcc = self.get_option('bcc')
+
+ def mail(self, subject='Ansible error mail', body=None):
+ if body is None:
+ body = subject
+
+ smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
+
+ b_sender = to_bytes(self.sender)
+ b_to = to_bytes(self.to)
+ b_cc = to_bytes(self.cc)
+ b_bcc = to_bytes(self.bcc)
+ b_subject = to_bytes(subject)
+ b_body = to_bytes(body)
+
+ b_content = b'From: %s\n' % b_sender
+ b_content += b'To: %s\n' % b_to
+ if self.cc:
+ b_content += b'Cc: %s\n' % b_cc
+ b_content += b'Subject: %s\n\n' % b_subject
+ b_content += b_body
+
+ b_addresses = b_to.split(b',')
+ if self.cc:
+ b_addresses += b_cc.split(b',')
+ if self.bcc:
+ b_addresses += b_bcc.split(b',')
+
+ for b_address in b_addresses:
+ smtp.sendmail(b_sender, b_address, b_content)
+
+ smtp.quit()
+
+ def subject_msg(self, multiline, failtype, linenr):
+ return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
+
+ def indent(self, multiline, indent=8):
+ return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
+
+ def body_blob(self, multiline, texttype):
+ ''' Turn some text output in a well-indented block for sending in a mail body '''
+ intro = 'with the following %s:\n\n' % texttype
+ blob = ''
+ for line in multiline.strip('\r\n').splitlines():
+ blob += '%s\n' % line
+ return intro + self.indent(blob) + '\n'
+
+ def mail_result(self, result, failtype):
+ host = result._host.get_name()
+ if not self.sender:
+ self.sender = '"Ansible: %s" <root>' % host
+
+ # Add subject
+ if self.itembody:
+ subject = self.itemsubject
+ elif result._result.get('failed_when_result') is True:
+ subject = "Failed due to 'failed_when' condition"
+ elif result._result.get('msg'):
+ subject = self.subject_msg(result._result['msg'], failtype, 0)
+ elif result._result.get('stderr'):
+ subject = self.subject_msg(result._result['stderr'], failtype, -1)
+ elif result._result.get('stdout'):
+ subject = self.subject_msg(result._result['stdout'], failtype, -1)
+ elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ subject = self.subject_msg(result._result['exception'], failtype, -1)
+ else:
+ subject = '%s: %s' % (failtype, result._task.name or result._task.action)
+
+ # Make playbook name visible (e.g. in Outlook/Gmail condensed view)
+ body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
+ if result._task.name:
+ body += 'Task: %s\n' % result._task.name
+ body += 'Module: %s\n' % result._task.action
+ body += 'Host: %s\n' % host
+ body += '\n'
+
+ # Add task information (as much as possible)
+ body += 'The following task failed:\n\n'
+ if 'invocation' in result._result:
+ body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
+ elif result._task.name:
+ body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
+ else:
+ body += self.indent('%s\n' % result._task.action)
+ body += '\n'
+
+ # Add item / message
+ if self.itembody:
+ body += self.itembody
+ elif result._result.get('failed_when_result') is True:
+ body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
+ elif result._result.get('msg'):
+ body += self.body_blob(result._result['msg'], 'message')
+
+ # Add stdout / stderr / exception / warnings / deprecations
+ if result._result.get('stdout'):
+ body += self.body_blob(result._result['stdout'], 'standard output')
+ if result._result.get('stderr'):
+ body += self.body_blob(result._result['stderr'], 'error output')
+ if result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ body += self.body_blob(result._result['exception'], 'exception')
+ if result._result.get('warnings'):
+ for i in range(len(result._result.get('warnings'))):
+ body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
+ if result._result.get('deprecations'):
+ for i in range(len(result._result.get('deprecations'))):
+ body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
+
+ body += 'and a complete dump of the error:\n\n'
+ body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
+
+ self.mail(subject=subject, body=body)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.itembody = ''
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors:
+ return
+
+ self.mail_result(result, 'Failed')
+
+ def v2_runner_on_unreachable(self, result):
+ self.mail_result(result, 'Unreachable')
+
+ def v2_runner_on_async_failed(self, result):
+ self.mail_result(result, 'Async failure')
+
+ def v2_runner_item_on_failed(self, result):
+ # Pass item information to task failure
+ self.itemsubject = result._result['msg']
+ self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py
new file mode 100644
index 00000000..a814a41c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Remi Verchere <remi@verchere.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: nrdp
+ type: notification
+ author: "Remi VERCHERE (@rverchere)"
+ short_description: post task result to a nagios server through nrdp
+ description:
+ - this callback send playbook result to nagios
+ - nagios shall use NRDP to recive passive events
+ - the passive check is sent to a dedicated host/service for ansible
+ options:
+ url:
+ description: url of the nrdp server
+ required: True
+ env:
+ - name : NRDP_URL
+ ini:
+ - section: callback_nrdp
+ key: url
+ validate_certs:
+ description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url)
+ env:
+ - name: NRDP_VALIDATE_CERTS
+ ini:
+ - section: callback_nrdp
+ key: validate_nrdp_certs
+ - section: callback_nrdp
+ key: validate_certs
+ default: False
+ aliases: [ validate_nrdp_certs ]
+ token:
+ description: token to be allowed to push nrdp events
+ required: True
+ env:
+ - name: NRDP_TOKEN
+ ini:
+ - section: callback_nrdp
+ key: token
+ hostname:
+ description: hostname where the passive check is linked to
+ required: True
+ env:
+ - name : NRDP_HOSTNAME
+ ini:
+ - section: callback_nrdp
+ key: hostname
+ servicename:
+ description: service where the passive check is linked to
+ required: True
+ env:
+ - name : NRDP_SERVICENAME
+ ini:
+ - section: callback_nrdp
+ key: servicename
+'''
+
+import os
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ '''
+ send ansible-playbook to Nagios server using nrdp protocol
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.nrdp'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ # Nagios states
+ OK = 0
+ WARNING = 1
+ CRITICAL = 2
+ UNKNOWN = 3
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+ if not self.url.endswith('/'):
+ self.url += '/'
+ self.token = self.get_option('token')
+ self.hostname = self.get_option('hostname')
+ self.servicename = self.get_option('servicename')
+ self.validate_nrdp_certs = self.get_option('validate_certs')
+
+ if (self.url or self.token or self.hostname or
+ self.servicename) is None:
+ self._display.warning("NRDP callback wants the NRDP_URL,"
+ " NRDP_TOKEN, NRDP_HOSTNAME,"
+ " NRDP_SERVICENAME"
+ " environment variables'."
+ " The NRDP callback plugin is disabled.")
+ self.disabled = True
+
+ def _send_nrdp(self, state, msg):
+ '''
+ nrpd service check send XMLDATA like this:
+ <?xml version='1.0'?>
+ <checkresults>
+ <checkresult type='service'>
+ <hostname>somehost</hostname>
+ <servicename>someservice</servicename>
+ <state>1</state>
+ <output>WARNING: Danger Will Robinson!|perfdata</output>
+ </checkresult>
+ </checkresults>
+ '''
+ xmldata = "<?xml version='1.0'?>\n"
+ xmldata += "<checkresults>\n"
+ xmldata += "<checkresult type='service'>\n"
+ xmldata += "<hostname>%s</hostname>\n" % self.hostname
+ xmldata += "<servicename>%s</servicename>\n" % self.servicename
+ xmldata += "<state>%d</state>\n" % state
+ xmldata += "<output>%s</output>\n" % msg
+ xmldata += "</checkresult>\n"
+ xmldata += "</checkresults>\n"
+
+ body = {
+ 'cmd': 'submitcheck',
+ 'token': self.token,
+ 'XMLDATA': bytes(xmldata)
+ }
+
+ try:
+ response = open_url(self.url,
+ data=urlencode(body),
+ method='POST',
+ validate_certs=self.validate_nrdp_certs)
+ return response.read()
+ except Exception as ex:
+ self._display.warning("NRDP callback cannot send result {0}".format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ '''
+ Display Playbook and play start messages
+ '''
+ self.play = play
+
+ def v2_playbook_on_stats(self, stats):
+ '''
+ Display info about playbook statistics
+ '''
+ name = self.play
+ gstats = ""
+ hosts = sorted(stats.processed.keys())
+ critical = warning = 0
+ for host in hosts:
+ stat = stats.summarize(host)
+ gstats += "'%s_ok'=%d '%s_changed'=%d \
+ '%s_unreachable'=%d '%s_failed'=%d " % \
+ (host, stat['ok'], host, stat['changed'],
+ host, stat['unreachable'], host, stat['failures'])
+ # Critical when failed tasks or unreachable host
+ critical += stat['failures']
+ critical += stat['unreachable']
+ # Warning when changed tasks
+ warning += stat['changed']
+
+ msg = "%s | %s" % (name, gstats)
+ if critical:
+ # Send Critical
+ self._send_nrdp(self.CRITICAL, msg)
+ elif warning:
+ # Send Warning
+ self._send_nrdp(self.WARNING, msg)
+ else:
+ # Send OK
+ self._send_nrdp(self.OK, msg)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py
new file mode 100644
index 00000000..e4ef684b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py
@@ -0,0 +1,30 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: 'null'
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: Don't display stuff to screen
+ description:
+ - This callback prevents outputing events to screen
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This callback wont print messages to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.null'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py
new file mode 100644
index 00000000..fe1a917e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py
@@ -0,0 +1,114 @@
+# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: say
+ type: notification
+ requirements:
+ - whitelisting in configuration
+ - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
+ short_description: notify using software speech synthesizer
+ description:
+ - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
+ notes:
+ - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
+'''
+
+import distutils.spawn
+import platform
+import subprocess
+import os
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ makes Ansible much more exciting.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.say'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ self.FAILED_VOICE = None
+ self.REGULAR_VOICE = None
+ self.HAPPY_VOICE = None
+ self.LASER_VOICE = None
+
+ self.synthesizer = distutils.spawn.find_executable('say')
+ if not self.synthesizer:
+ self.synthesizer = distutils.spawn.find_executable('espeak')
+ if self.synthesizer:
+ self.FAILED_VOICE = 'klatt'
+ self.HAPPY_VOICE = 'f5'
+ self.LASER_VOICE = 'whisper'
+ elif platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ else:
+ self.FAILED_VOICE = 'Zarvox'
+ self.REGULAR_VOICE = 'Trinoids'
+ self.HAPPY_VOICE = 'Cellos'
+ self.LASER_VOICE = 'Princess'
+
+ # plugin disable itself if say is not present
+ # ansible will not call any callback if disabled is set to True
+ if not self.synthesizer:
+ self.disabled = True
+ self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+
+ def say(self, msg, voice):
+ cmd = [self.synthesizer, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ subprocess.call(cmd)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_ok(self, host, res):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_skipped(self, host, item=None):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_unreachable(self, host, res):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def playbook_on_start(self):
+ self.say("Running Playbook", self.REGULAR_VOICE)
+
+ def playbook_on_notify(self, host, handler):
+ self.say("pew", self.LASER_VOICE)
+
+ def playbook_on_task_start(self, name, is_conditional):
+ if not is_conditional:
+ self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ else:
+ self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+
+ def playbook_on_setup(self):
+ self.say("Gathering facts", self.REGULAR_VOICE)
+
+ def playbook_on_play_start(self, name):
+ self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+
+ def playbook_on_stats(self, stats):
+ self.say("Play complete", self.HAPPY_VOICE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py
new file mode 100644
index 00000000..fe1a917e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py
@@ -0,0 +1,114 @@
+# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: say
+ type: notification
+ requirements:
+ - whitelisting in configuration
+ - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
+ short_description: notify using software speech synthesizer
+ description:
+ - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
+ notes:
+ - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
+'''
+
+import distutils.spawn
+import platform
+import subprocess
+import os
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ makes Ansible much more exciting.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.say'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ self.FAILED_VOICE = None
+ self.REGULAR_VOICE = None
+ self.HAPPY_VOICE = None
+ self.LASER_VOICE = None
+
+ self.synthesizer = distutils.spawn.find_executable('say')
+ if not self.synthesizer:
+ self.synthesizer = distutils.spawn.find_executable('espeak')
+ if self.synthesizer:
+ self.FAILED_VOICE = 'klatt'
+ self.HAPPY_VOICE = 'f5'
+ self.LASER_VOICE = 'whisper'
+ elif platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ else:
+ self.FAILED_VOICE = 'Zarvox'
+ self.REGULAR_VOICE = 'Trinoids'
+ self.HAPPY_VOICE = 'Cellos'
+ self.LASER_VOICE = 'Princess'
+
+ # plugin disable itself if say is not present
+ # ansible will not call any callback if disabled is set to True
+ if not self.synthesizer:
+ self.disabled = True
+ self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+
+ def say(self, msg, voice):
+ cmd = [self.synthesizer, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ subprocess.call(cmd)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_ok(self, host, res):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_skipped(self, host, item=None):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_unreachable(self, host, res):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def playbook_on_start(self):
+ self.say("Running Playbook", self.REGULAR_VOICE)
+
+ def playbook_on_notify(self, host, handler):
+ self.say("pew", self.LASER_VOICE)
+
+ def playbook_on_task_start(self, name, is_conditional):
+ if not is_conditional:
+ self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ else:
+ self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+
+ def playbook_on_setup(self):
+ self.say("Gathering facts", self.REGULAR_VOICE)
+
+ def playbook_on_play_start(self, name):
+ self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+
+ def playbook_on_stats(self, stats):
+ self.say("Play complete", self.HAPPY_VOICE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py
new file mode 100644
index 00000000..9521081e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py
@@ -0,0 +1,276 @@
+# (c) Fastly, inc 2016
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: selective
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: only print certain tasks
+ description:
+ - This callback only prints tasks that have been tagged with `print_action` or that have failed.
+ This allows operators to focus on the tasks that provide value only.
+ - Tasks that are not printed are placed with a '.'.
+ - If you increase verbosity all tasks are printed.
+ options:
+ nocolor:
+ default: False
+ description: This setting allows suppressing colorizing output
+ env:
+ - name: ANSIBLE_NOCOLOR
+ - name: ANSIBLE_SELECTIVE_DONT_COLORIZE
+ ini:
+ - section: defaults
+ key: nocolor
+ type: boolean
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug: msg="This will not be printed"
+ - ansible.builtin.debug: msg="But this will"
+ tags: [print_action]
+"""
+
+import difflib
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils._text import to_text
+from ansible.utils.color import codeCodes
+
+DONT_COLORIZE = False
+COLORS = {
+ 'normal': '\033[0m',
+ 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
+ 'bold': '\033[1m',
+ 'not_so_bold': '\033[1m\033[34m',
+ 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
+ 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
+ 'endc': '\033[0m',
+ 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
+}
+
+
+def dict_diff(prv, nxt):
+ """Return a dict of keys that differ with another config object."""
+ keys = set(prv.keys() + nxt.keys())
+ result = {}
+ for k in keys:
+ if prv.get(k) != nxt.get(k):
+ result[k] = (prv.get(k), nxt.get(k))
+ return result
+
+
+def colorize(msg, color):
+ """Given a string add necessary codes to format the string."""
+ if DONT_COLORIZE:
+ return msg
+ else:
+ return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
+
+
+class CallbackModule(CallbackBase):
+ """selective.py callback plugin."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.selective'
+
+ def __init__(self, display=None):
+ """selective.py callback plugin."""
+ super(CallbackModule, self).__init__(display)
+ self.last_skipped = False
+ self.last_task_name = None
+ self.printed_last_task = False
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ global DONT_COLORIZE
+ DONT_COLORIZE = self.get_option('nocolor')
+
+ def _print_task(self, task_name=None):
+ if task_name is None:
+ task_name = self.last_task_name
+
+ if not self.printed_last_task:
+ self.printed_last_task = True
+ line_length = 120
+ if self.last_skipped:
+ print()
+ msg = colorize("# {0} {1}".format(task_name,
+ '*' * (line_length - len(task_name))), 'bold')
+ print(msg)
+
+ def _indent_text(self, text, indent_level):
+ lines = text.splitlines()
+ result_lines = []
+ for l in lines:
+ result_lines.append("{0}{1}".format(' ' * indent_level, l))
+ return '\n'.join(result_lines)
+
+ def _print_diff(self, diff, indent_level):
+ if isinstance(diff, dict):
+ try:
+ diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(),
+ diff['after'].splitlines(),
+ fromfile=diff.get('before_header',
+ 'new_file'),
+ tofile=diff['after_header']))
+ except AttributeError:
+ diff = dict_diff(diff['before'], diff['after'])
+ if diff:
+ diff = colorize(str(diff), 'changed')
+ print(self._indent_text(diff, indent_level + 4))
+
+ def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr):
+ if is_host:
+ indent_level = 0
+ name = colorize(host_or_item.name, 'not_so_bold')
+ else:
+ indent_level = 4
+ if isinstance(host_or_item, dict):
+ if 'key' in host_or_item.keys():
+ host_or_item = host_or_item['key']
+ name = colorize(to_text(host_or_item), 'bold')
+
+ if error:
+ color = 'failed'
+ change_string = colorize('FAILED!!!', color)
+ else:
+ color = 'changed' if changed else 'ok'
+ change_string = colorize("changed={0}".format(changed), color)
+
+ msg = colorize(msg, color)
+
+ line_length = 120
+ spaces = ' ' * (40 - len(name) - indent_level)
+ line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
+
+ if len(msg) < 50:
+ line += ' -- {0}'.format(msg)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(msg, indent_level + 4))
+
+ if diff:
+ self._print_diff(diff, indent_level)
+ if stdout:
+ stdout = colorize(stdout, 'failed')
+ print(self._indent_text(stdout, indent_level + 4))
+ if stderr:
+ stderr = colorize(stderr, 'failed')
+ print(self._indent_text(stderr, indent_level + 4))
+
+ def v2_playbook_on_play_start(self, play):
+ """Run on start of the play."""
+ pass
+
+ def v2_playbook_on_task_start(self, task, **kwargs):
+ """Run when a task starts."""
+ self.last_task_name = task.get_name()
+ self.printed_last_task = False
+
+ def _print_task_result(self, result, error=False, **kwargs):
+ """Run when a task finishes correctly."""
+
+ if 'print_action' in result._task.tags or error or self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+ msg = to_text(result._result.get('msg', '')) or\
+ to_text(result._result.get('reason', ''))
+
+ stderr = [result._result.get('exception', None),
+ result._result.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(result._host,
+ result._result.get('changed', False),
+ msg,
+ result._result.get('diff', None),
+ is_host=True,
+ error=error,
+ stdout=result._result.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ if 'results' in result._result:
+ for r in result._result['results']:
+ failed = 'failed' in r and r['failed']
+
+ stderr = [r.get('exception', None), r.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(r['item'],
+ r.get('changed', False),
+ to_text(r.get('msg', '')),
+ r.get('diff', None),
+ is_host=False,
+ error=failed,
+ stdout=r.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ else:
+ self.last_skipped = True
+ print('.', end="")
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics."""
+ print()
+ self.printed_last_task = False
+ self._print_task('STATS')
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ s = stats.summarize(host)
+
+ if s['failures'] or s['unreachable']:
+ color = 'failed'
+ elif s['changed']:
+ color = 'changed'
+ else:
+ color = 'ok'
+
+ msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
+ host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
+ print(colorize(msg, color))
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ """Run when a task is skipped."""
+ if self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+
+ line_length = 120
+ spaces = ' ' * (31 - len(result._host.name) - 4)
+
+ line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
+ spaces,
+ colorize("skipped", 'skipped'),)
+
+ reason = result._result.get('skipped_reason', '') or \
+ result._result.get('skip_reason', '')
+ if len(reason) < 50:
+ line += ' -- {0}'.format(reason)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(reason, 8))
+ print(reason)
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self._print_task_result(result, error=False, **kwargs)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ v2_playbook_on_handler_task_start = v2_playbook_on_task_start
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py
new file mode 100644
index 00000000..33cee0ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py
@@ -0,0 +1,251 @@
+# (C) 2014-2015, Matt Martz <matt@sivel.net>
+# (C) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: slack
+ type: notification
+ requirements:
+ - whitelist in configuration
+ - prettytable (python library)
+ short_description: Sends play events to a Slack channel
+ description:
+ - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
+ - Before 2.4 only environment variables were available for configuring this plugin
+ options:
+ webhook_url:
+ required: True
+ description: Slack Webhook URL
+ env:
+ - name: SLACK_WEBHOOK_URL
+ ini:
+ - section: callback_slack
+ key: webhook_url
+ channel:
+ default: "#ansible"
+ description: Slack room to post in.
+ env:
+ - name: SLACK_CHANNEL
+ ini:
+ - section: callback_slack
+ key: channel
+ username:
+ description: Username to post as.
+ env:
+ - name: SLACK_USERNAME
+ default: ansible
+ ini:
+ - section: callback_slack
+ key: username
+ validate_certs:
+ description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
+ env:
+ - name: SLACK_VALIDATE_CERTS
+ ini:
+ - section: callback_slack
+ key: validate_certs
+ default: True
+ type: bool
+'''
+
+import json
+import os
+import uuid
+
+from ansible import context
+from ansible.module_utils._text import to_text
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+
+class CallbackModule(CallbackBase):
+ """This is an ansible callback plugin that sends status
+ updates to a Slack channel during playbook execution.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.slack'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not '
+ 'installed. Disabling the Slack callback '
+ 'plugin.')
+
+ self.playbook_name = None
+
+ # This is a 6 character identifier provided with each message
+ # This makes it easier to correlate messages when there are more
+ # than 1 simultaneous playbooks running
+ self.guid = uuid.uuid4().hex[:6]
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.webhook_url = self.get_option('webhook_url')
+ self.channel = self.get_option('channel')
+ self.username = self.get_option('username')
+ self.show_invocation = (self._display.verbosity > 1)
+ self.validate_certs = self.get_option('validate_certs')
+
+ if self.webhook_url is None:
+ self.disabled = True
+ self._display.warning('Slack Webhook URL was not provided. The '
+ 'Slack Webhook URL can be provided using '
+ 'the `SLACK_WEBHOOK_URL` environment '
+ 'variable.')
+
+ def send_msg(self, attachments):
+ headers = {
+ 'Content-type': 'application/json',
+ }
+
+ payload = {
+ 'channel': self.channel,
+ 'username': self.username,
+ 'attachments': attachments,
+ 'parse': 'none',
+ 'icon_url': ('https://cdn2.hubspot.net/hub/330046/'
+ 'file-449187601-png/ansible_badge.png'),
+ }
+
+ data = json.dumps(payload)
+ self._display.debug(data)
+ self._display.debug(self.webhook_url)
+ try:
+ response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
+ headers=headers)
+ return response.read()
+ except Exception as e:
+ self._display.warning(u'Could not submit message to Slack: %s' %
+ to_text(e))
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook_name = os.path.basename(playbook._file_name)
+
+ title = [
+ '*Playbook initiated* (_%s_)' % self.guid
+ ]
+
+ invocation_items = []
+ if context.CLIARGS and self.show_invocation:
+ tags = context.CLIARGS['tags']
+ skip_tags = context.CLIARGS['skip_tags']
+ extra_vars = context.CLIARGS['extra_vars']
+ subset = context.CLIARGS['subset']
+ inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
+
+ invocation_items.append('Inventory: %s' % ', '.join(inventory))
+ if tags and tags != ['all']:
+ invocation_items.append('Tags: %s' % ', '.join(tags))
+ if skip_tags:
+ invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
+ if subset:
+ invocation_items.append('Limit: %s' % subset)
+ if extra_vars:
+ invocation_items.append('Extra Vars: %s' %
+ ' '.join(extra_vars))
+
+ title.append('by *%s*' % context.CLIARGS['remote_user'])
+
+ title.append('\n\n*%s*' % self.playbook_name)
+ msg_items = [' '.join(title)]
+ if invocation_items:
+ msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
+
+ msg = '\n'.join(msg_items)
+
+ attachments = [{
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }]
+
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Play start messages"""
+
+ name = play.name or 'Play name not specified (%s)' % play._uuid
+ msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
+ attachments = [
+ {
+ 'fallback': msg,
+ 'text': msg,
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }
+ ]
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures', 'Rescued', 'Ignored'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures', 'rescued', 'ignored']])
+
+ attachments = []
+ msg_items = [
+ '*Playbook Complete* (_%s_)' % self.guid
+ ]
+ if failures or unreachable:
+ color = 'danger'
+ msg_items.append('\n*Failed!*')
+ else:
+ color = 'good'
+ msg_items.append('\n*Success!*')
+
+ msg_items.append('```\n%s\n```' % t)
+
+ msg = '\n'.join(msg_items)
+
+ attachments.append({
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': color,
+ 'mrkdwn_in': ['text', 'fallback', 'fields']
+ })
+
+ self.send_msg(attachments=attachments)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py
new file mode 100644
index 00000000..68480752
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: splunk
+ type: aggregate
+ short_description: Sends task result events to Splunk HTTP Event Collector
+ author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
+ description:
+ - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
+ - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
+ - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
+ requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP Event Collector in Splunk'
+ - 'Define the url and token in ansible.cfg'
+ options:
+ url:
+ description: URL to the Splunk HTTP collector source
+ env:
+ - name: SPLUNK_URL
+ ini:
+ - section: callback_splunk
+ key: url
+ authtoken:
+ description: Token to authenticate the connection to the Splunk HTTP collector
+ env:
+ - name: SPLUNK_AUTHTOKEN
+ ini:
+ - section: callback_splunk
+ key: authtoken
+ validate_certs:
+ description: Whether to validate certificates for connections to HEC. It is not recommended to set to
+ C(false) except when you are sure that nobody can intercept the connection
+ between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks!
+ env:
+ - name: SPLUNK_VALIDATE_CERTS
+ ini:
+ - section: callback_splunk
+ key: validate_certs
+ type: bool
+ default: true
+ version_added: '1.0.0'
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.splunk
+ Set the environment variable
+ export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
+ Set the ansible.cfg variable in the callback_splunk block
+ [callback_splunk]
+ url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SplunkHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, authtoken, validate_certs, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
+ '+0000')
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ # This wraps the json payload in and outer json event needed by Splunk
+ jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
+ jsondata = '{"event":' + jsondata + "}"
+
+ open_url(
+ url,
+ jsondata,
+ headers={
+ 'Content-type': 'application/json',
+ 'Authorization': 'Splunk ' + authtoken
+ },
+ method='POST',
+ validate_certs=validate_certs
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.splunk'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.authtoken = None
+ self.validate_certs = None
+ self.splunk = SplunkHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys,
+ var_options=var_options,
+ direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector source URL was '
+ 'not provided. The Splunk HTTP collector '
+ 'source URL can be provided using the '
+ '`SPLUNK_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.authtoken = self.get_option('authtoken')
+
+ if self.authtoken is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector requires an authentication'
+ 'token. The Splunk HTTP collector '
+ 'authentication token can be provided using the '
+ '`SPLUNK_AUTHTOKEN` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.validate_certs = self.get_option('validate_certs')
+
+ def v2_playbook_on_start(self, playbook):
+ self.splunk.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py
new file mode 100644
index 00000000..9aa0e3fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py
@@ -0,0 +1,71 @@
+# (c) 2017, Frederic Van Espen <github@freh.be>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: stderr
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: Splits output, sending failed tasks to stderr
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
+ extends_documentation_fragment:
+ - default_callback
+ description:
+ - This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
+ - Also it does not output skipped host/task/item status
+'''
+
+from ansible import constants as C
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the stderr callback plugin, which reuses the default
+ callback plugin but sends error output to stderr.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.stderr'
+
+ def __init__(self):
+
+ self.super_ref = super(CallbackModule, self)
+ self.super_ref.__init__()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result, use_stderr=True)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)), color=C.COLOR_ERROR,
+ stderr=True)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR, stderr=True)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py
new file mode 100644
index 00000000..bfb8d586
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: sumologic
+type: aggregate
+short_description: Sends task result events to Sumologic
+author: "Ryan Currah (@ryancurrah)"
+description:
+ - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source
+requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
+ of C("timestamp": "(.*)")'
+options:
+ url:
+ description: URL to the Sumologic HTTP collector source
+ env:
+ - name: SUMOLOGIC_URL
+ ini:
+ - section: callback_sumologic
+ key: url
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.sumologic
+
+ Set the environment variable
+ export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+
+ Set the ansible.cfg variable in the callback_sumologic block
+ [callback_sumologic]
+ url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SumologicHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
+ '+0000')
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ open_url(
+ url,
+ data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True),
+ headers={
+ 'Content-type': 'application/json',
+ 'X-Sumo-Host': data['ansible_host']
+ },
+ method='POST'
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.sumologic'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.sumologic = SumologicHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Sumologic HTTP collector source URL was '
+ 'not provided. The Sumologic HTTP collector '
+ 'source URL can be provided using the '
+ '`SUMOLOGIC_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ def v2_playbook_on_start(self, playbook):
+ self.sumologic.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py
new file mode 100644
index 00000000..dad34c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py
@@ -0,0 +1,107 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: syslog_json
+ type: notification
+ requirements:
+ - whitelist in configuration
+ short_description: sends JSON events to syslog
+ description:
+ - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format
+ - Before 2.9 only environment variables were available for configuration
+ options:
+ server:
+ description: syslog server that will receive the event
+ env:
+ - name: SYSLOG_SERVER
+ default: localhost
+ ini:
+ - section: callback_syslog_json
+ key: syslog_server
+ port:
+ description: port on which the syslog server is listening
+ env:
+ - name: SYSLOG_PORT
+ default: 514
+ ini:
+ - section: callback_syslog_json
+ key: syslog_port
+ facility:
+ description: syslog facility to log as
+ env:
+ - name: SYSLOG_FACILITY
+ default: user
+ ini:
+ - section: callback_syslog_json
+ key: syslog_facility
+'''
+
+import os
+import json
+
+import logging
+import logging.handlers
+
+import socket
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs ansible-playbook and ansible runs to a syslog server in json format
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.syslog_json'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ syslog_host = self.get_option("server")
+ syslog_port = int(self.get_option("port"))
+ syslog_facility = self.get_option("facility")
+
+ self.logger = logging.getLogger('ansible logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handler = logging.handlers.SysLogHandler(
+ address=(syslog_host, syslog_port),
+ facility=syslog_facility
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_ok(self, host, res):
+ self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_skipped(self, host, item=None):
+ self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
+
+ def runner_on_unreachable(self, host, res):
+ self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py
new file mode 100644
index 00000000..fa3e6d25
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py
@@ -0,0 +1,246 @@
+# Copyright: (c) 2017, Allyson Bowles <@akatch>
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: unixy
+ type: stdout
+ author: Allyson Bowles (@akatch)
+ short_description: condensed Ansible output
+ description:
+ - Consolidated Ansible output in the style of LINUX/UNIX startup logs.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+from os.path import basename
+from ansible import constants as C
+from ansible import context
+from ansible.module_utils._text import to_text
+from ansible.utils.color import colorize, hostcolor
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ Design goals:
+ - Print consolidated output that looks like a *NIX startup log
+ - Defaults should avoid displaying unnecessary information wherever possible
+
+ TODOs:
+ - Only display task names if the task runs on at least one host
+ - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
+ - Consolidate stats display
+ - Display whether run is in --check mode
+ - Don't show play name if no hosts found
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.unixy'
+
+ def _run_is_verbose(self, result):
+ return ((self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result)
+
+ def _get_task_display_name(self, task):
+ self.task_display_name = None
+ display_name = task.get_name().strip().split(" : ")
+
+ task_display_name = display_name[-1]
+ if task_display_name.startswith("include"):
+ return
+ else:
+ self.task_display_name = task_display_name
+
+ def _preprocess_result(self, result):
+ self.delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
+ self._handle_warnings(result._result)
+
+ def _process_result_output(self, result, msg):
+ task_host = result._host.get_name()
+ task_result = "%s %s" % (task_host, msg)
+
+ if self._run_is_verbose(result):
+ task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4))
+ return task_result
+
+ if self.delegated_vars:
+ task_delegate_host = self.delegated_vars['ansible_host']
+ task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg)
+
+ if result._result.get('msg') and result._result.get('msg') != "All items completed":
+ task_result += " | msg: " + to_text(result._result.get('msg'))
+
+ if result._result.get('stdout'):
+ task_result += " | stdout: " + result._result.get('stdout')
+
+ if result._result.get('stderr'):
+ task_result += " | stderr: " + result._result.get('stderr')
+
+ return task_result
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s..." % self.task_display_name)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s (via handler)... " % self.task_display_name)
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if name and play.hosts:
+ msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ else:
+ msg = u"---"
+
+ self._display.display(msg)
+
+ def v2_runner_on_skipped(self, result, ignore_errors=False):
+ if self.display_skipped_hosts:
+ self._preprocess_result(result)
+ display_color = C.COLOR_SKIP
+ msg = "skipped"
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ else:
+ return
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._preprocess_result(result)
+ display_color = C.COLOR_ERROR
+ msg = "failed"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+
+ def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
+ self._preprocess_result(result)
+
+ result_was_changed = ('changed' in result._result and result._result['changed'])
+ if result_was_changed:
+ msg = "done"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+ display_color = C.COLOR_CHANGED
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ elif self.display_ok_hosts:
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+
+ def v2_runner_item_on_skipped(self, result):
+ self.v2_runner_on_skipped(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self.v2_runner_on_failed(result)
+
+ def v2_runner_item_on_ok(self, result):
+ self.v2_runner_on_ok(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._preprocess_result(result)
+
+ msg = "unreachable"
+ display_color = C.COLOR_UNREACHABLE
+ task_result = self._process_result_output(result, msg)
+
+ self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+
+ def v2_on_file_diff(self, result):
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff'] and res.get('changed', False):
+ diff = self._get_diff(res['diff'])
+ if diff:
+ self._display.display(diff)
+ elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
+ diff = self._get_diff(result._result['diff'])
+ if diff:
+ self._display.display(diff)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.display("\n- Play recap -", screen_only=True)
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ # TODO how else can we display these?
+ t = stats.summarize(h)
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None),
+ colorize(u'rescued', t['rescued'], None),
+ colorize(u'ignored', t['ignored'], None)),
+ log_only=True
+ )
+ if stats.custom and self.show_custom_stats:
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display(" No hosts found!", color=C.COLOR_DEBUG)
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
+
+ def v2_playbook_on_start(self, playbook):
+ # TODO display whether this run is happening in check mode
+ self._display.display("Executing playbook %s" % basename(playbook._file_name))
+
+ # show CLI arguments
+ if self._display.verbosity > 3:
+ if context.CLIARGS.get('args'):
+ self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ color=C.COLOR_VERBOSE, screen_only=True)
+
+ for argument in (a for a in context.CLIARGS if a != 'args'):
+ val = context.CLIARGS[argument]
+ if val:
+ self._display.vvvv('%s: %s' % (argument, val))
+
+ def v2_runner_retry(self, result):
+ msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries'])
+ if self._run_is_verbose(result):
+ msg += "Result was: %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_DEBUG)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py
new file mode 100644
index 00000000..2fbb2f48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py
@@ -0,0 +1,130 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: yaml
+ type: stdout
+ short_description: yaml-ized Ansible screen output
+ description:
+ - Ansible output that can be quite a bit easier to read than the
+ default JSON formatting.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+import yaml
+import json
+import re
+import string
+import sys
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+# from http://stackoverflow.com/a/15423007/115478
+def should_use_block(value):
+ """Returns true if string should be in block format"""
+ for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
+ if c in value:
+ return True
+ return False
+
+
+def my_represent_scalar(self, tag, value, style=None):
+ """Uses block style for multi-line strings"""
+ if style is None:
+ if should_use_block(value):
+ style = '|'
+ # we care more about readable than accuracy, so...
+ # ...no trailing space
+ value = value.rstrip()
+ # ...and non-printable characters
+ value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
+ # ...tabs prevent blocks from expanding
+ value = value.expandtabs()
+ # ...and odd bits of whitespace
+ value = re.sub(r'[\x0b\x0c\r]', '', value)
+ # ...as does trailing space
+ value = re.sub(r' +\n', '\n', value)
+ else:
+ style = self.default_style
+ node = yaml.representer.ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+
+class CallbackModule(Default):
+
+ """
+ Variation of the Default output which uses nicely readable YAML instead
+ of JSON for printing results.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.yaml'
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
+
+ def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
+ if result.get('_ansible_no_log', False):
+ return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ abridged_result = strip_internal_keys(module_response_deepcopy(result))
+
+ # remove invocation unless specifically wanting it
+ if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
+ del abridged_result['invocation']
+
+ # remove diff information from screen output
+ if self._display.verbosity < 3 and 'diff' in result:
+ del abridged_result['diff']
+
+ # remove exception from screen output
+ if 'exception' in abridged_result:
+ del abridged_result['exception']
+
+ dumped = ''
+
+ # put changed and skipped into a header line
+ if 'changed' in abridged_result:
+ dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
+ del abridged_result['changed']
+
+ if 'skipped' in abridged_result:
+ dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
+ del abridged_result['skipped']
+
+ # if we already have stdout, we don't need stdout_lines
+ if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
+ abridged_result['stdout_lines'] = '<omitted>'
+
+ # if we already have stderr, we don't need stderr_lines
+ if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
+ abridged_result['stderr_lines'] = '<omitted>'
+
+ if abridged_result:
+ dumped += '\n'
+ dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
+
+ # indent by a couple of spaces
+ dumped = '\n '.join(dumped.split('\n')).rstrip()
+ return dumped
+
+ def _serialize_diff(self, diff):
+ return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py
new file mode 100644
index 00000000..7c9aed10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py
@@ -0,0 +1,206 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
+ connection: chroot
+ short_description: Interact with local chroot
+ description:
+ - Run commands or put/fetch files to an existing chroot on the Ansible controller.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ executable:
+ description:
+ - User specified executable shell
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ default: /bin/sh
+ chroot_exe:
+ description:
+ - User specified chroot binary
+ ini:
+ - section: chroot_connection
+ key: exe
+ env:
+ - name: ANSIBLE_CHROOT_EXE
+ vars:
+ - name: ansible_chroot_exe
+ default: chroot
+'''
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local chroot based connections '''
+
+ transport = 'community.general.chroot'
+ has_pipelining = True
+ # su currently has an undiagnosed issue with calculating the file
+ # checksums (so copy, for instance, doesn't work right)
+ # Have to look into that before re-enabling this
+ has_tty = False
+
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.chroot = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("chroot connection requires running as root")
+
+ # we're running as root on the local system so do some
+ # trivial checks for ensuring 'host' is actually a chroot'able dir
+ if not os.path.isdir(self.chroot):
+ raise AnsibleError("%s is not a directory" % self.chroot)
+
+ chrootsh = os.path.join(self.chroot, 'bin/sh')
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
+ raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+
+ def _connect(self):
+ ''' connect to the chroot '''
+ if os.path.isabs(self.get_option('chroot_exe')):
+ self.chroot_cmd = self.get_option('chroot_exe')
+ else:
+ try:
+ self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the chroot. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+ executable = self.get_option('executable')
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the chroot '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to chroot '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from chroot to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py
new file mode 100644
index 00000000..732620b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py
@@ -0,0 +1,366 @@
+# Based on the chroot connection plugin by Maykel Moya
+#
+# (c) 2014, Lorin Hochstein
+# (c) 2015, Leendert Brouwer (https://github.com/objectified)
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Lorin Hochestein (!UNKNOWN)
+ - Leendert Brouwer (!UNKNOWN)
+ connection: docker
+ short_description: Run tasks in docker containers
+ description:
+ - Run commands or put/fetch files to an existing docker container.
+ options:
+ remote_user:
+ description:
+ - The user to execute as inside the container
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ docker_extra_args:
+ description:
+ - Extra arguments to pass to the docker command line
+ default: ''
+ remote_addr:
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_docker_host
+'''
+
+import distutils.spawn
+import fcntl
+import os
+import os.path
+import subprocess
+import re
+
+from distutils.version import LooseVersion
+
+import ansible.constants as C
+from ansible.compat import selectors
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.general.docker'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: docker supports running as non-root in some configurations.
+ # (For instance, setting the UNIX socket file to be readable and
+ # writable by a specific UNIX group and then putting users into that
+ # group). Therefore we don't check that the user is root when using
+ # this connection. But if the user is getting a permission denied
+ # error it probably means that docker on their system is only
+ # configured to be connected to by root and they are not running as
+ # root.
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ if 'docker_command' in kwargs:
+ self.docker_cmd = kwargs['docker_command']
+ else:
+ self.docker_cmd = distutils.spawn.find_executable('docker')
+ if not self.docker_cmd:
+ raise AnsibleError("docker command not found in PATH")
+
+ docker_version = self._get_docker_version()
+ if docker_version == u'dev':
+ display.warning(u'Docker version number is "dev". Will assume latest version.')
+ if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
+ raise AnsibleError('docker connection type requires docker 1.3 or higher')
+
+ # The remote user we will request from docker (if supported)
+ self.remote_user = None
+ # The actual user which will execute commands in docker (if known)
+ self.actual_user = None
+
+ if self._play_context.remote_user is not None:
+ if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
+ # Support for specifying the exec user was added in docker 1.7
+ self.remote_user = self._play_context.remote_user
+ self.actual_user = self.remote_user
+ else:
+ self.actual_user = self._get_docker_remote_user()
+
+ if self.actual_user != self._play_context.remote_user:
+ display.warning(u'docker {0} does not support remote_user, using container default: {1}'
+ .format(docker_version, self.actual_user or u'?'))
+ elif self._display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ self.actual_user = self._get_docker_remote_user()
+
+ @staticmethod
+ def _sanitize_version(version):
+ version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
+ version = re.sub(u'^v', u'', version)
+ return version
+
+ def _old_docker_version(self):
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ old_version_subcommand = ['version']
+
+ old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
+ p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+
+ return old_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _new_docker_version(self):
+ # no result yet, must be newer Docker version
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
+
+ new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
+ p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+ return new_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _get_docker_version(self):
+
+ cmd, cmd_output, err, returncode = self._old_docker_version()
+ if returncode == 0:
+ for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
+ if line.startswith(u'Server version:'): # old docker versions
+ return self._sanitize_version(line.split()[2])
+
+ cmd, cmd_output, err, returncode = self._new_docker_version()
+ if returncode:
+ raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
+
+ return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
+
+ def _get_docker_remote_user(self):
+ """ Get the default user configured in the docker container """
+ p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ out, err = p.communicate()
+ out = to_text(out, errors='surrogate_or_strict')
+
+ if p.returncode != 0:
+ display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
+ return None
+
+ # The default exec user is root, unless it was changed in the Dockerfile with USER
+ return out.strip() or u'root'
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local docker exec command to run cmd on remote_host
+
+ If remote_user is available and is supported by the docker
+ version we are using, it will be provided to docker exec.
+ """
+
+ local_cmd = [self.docker_cmd]
+
+ if self._play_context.docker_extra_args:
+ local_cmd += self._play_context.docker_extra_args.split(' ')
+
+ local_cmd += [b'exec']
+
+ if self.remote_user is not None:
+ local_cmd += [b'-u', self.remote_user]
+
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += [b'-i', self._play_context.remote_addr] + cmd
+
+ return local_cmd
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self._play_context.remote_addr
+ )
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ p = subprocess.Popen(
+ local_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with docker.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ out_path = shlex_quote(out_path)
+ # Older docker doesn't have native support for copying files into
+ # running containers, so we use docker exec to implement this
+ # Although docker version 1.8 and later provide support, the
+ # owner and group of the files are always set to root
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ in_path = self._prefix_login_path(in_path)
+ # out_path is the final file path, but docker takes a directory, not a
+ # file path
+ out_dir = os.path.dirname(out_path)
+
+ args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
+ else:
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+
+ if p.returncode != 0:
+ # Older docker doesn't have native support for fetching files command `cp`
+ # If `cp` fails, try to use `dd` instead
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ # Rename if needed
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py
new file mode 100644
index 00000000..83f4a9e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py
@@ -0,0 +1,102 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@msherer) <misc@zarb.org>
+ connection: funcd
+ short_description: Use funcd to connect to target
+ description:
+ - This transport permits you to use Ansible over Func.
+ - For people who have already setup func and that wish to play with ansible,
+ this permit to move gradually to ansible without having to redo completely the setup of the network.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_func_host
+'''
+
+HAVE_FUNC = False
+try:
+ import func.overlord.client as fc
+ HAVE_FUNC = True
+except ImportError:
+ pass
+
+import os
+import tempfile
+import shutil
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(object):
+ ''' Func-based connections '''
+
+ has_pipelining = False
+
+ def __init__(self, runner, host, port, *args, **kwargs):
+ self.runner = runner
+ self.host = host
+ # port is unused, this go on func
+ self.port = port
+
+ def connect(self, port=None):
+ if not HAVE_FUNC:
+ raise AnsibleError("func is not installed")
+
+ self.client = fc.Client(self.host)
+ return self
+
+ def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
+ ''' run a command on the remote minion '''
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ # totally ignores privlege escalation
+ display.vvv("EXEC %s" % (cmd), host=self.host)
+ p = self.client.command.run(cmd)[self.host]
+ return (p[0], p[1], p[2])
+
+ def _normalize_path(self, path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ out_path = self._normalize_path(out_path, '/')
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self.client.local.copyfile.send(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ in_path = self._normalize_path(in_path, '/')
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ # need to use a tmp dir due to difference of semantic for getfile
+ # ( who take a # directory as destination) and fetch_file, who
+ # take a file directly
+ tmpdir = tempfile.mkdtemp(prefix="func_ansible")
+ self.client.local.getfile.get(in_path, tmpdir)
+ shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path)
+ shutil.rmtree(tmpdir)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py
new file mode 100644
index 00000000..aafd3a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py
@@ -0,0 +1,82 @@
+# Based on jail.py
+# (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2016, Stephan Lohse <dev-github@ploek.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
+ connection: iocage
+ short_description: Run tasks in iocage jails
+ description:
+ - Run commands or put/fetch files to an existing iocage jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ vars:
+ - name: ansible_host
+ - name: ansible_iocage_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_iocage_user
+'''
+
+import subprocess
+
+from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
+from ansible.module_utils._text import to_native
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(Jail):
+ ''' Local iocage based connections '''
+
+ transport = 'community.general.iocage'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ self.ioc_jail = play_context.remote_addr
+
+ self.iocage_cmd = Jail._search_executable('iocage')
+
+ jail_uuid = self.get_jail_uuid()
+
+ kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid)
+
+ display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
+ iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
+ host=kwargs[Jail.modified_jailname_key])
+
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ def get_jail_uuid(self):
+ p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ stdout, stderr = p.communicate()
+
+ if stdout is not None:
+ stdout = to_native(stdout)
+
+ if stderr is not None:
+ stderr = to_native(stderr)
+
+ # otherwise p.returncode would not be set
+ p.wait()
+
+ if p.returncode != 0:
+ raise AnsibleError(u"iocage returned an error: {0}".format(stdout))
+
+ return stdout.strip('\n')
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py
new file mode 100644
index 00000000..7b44b9cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py
@@ -0,0 +1,201 @@
+# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ connection: jail
+ short_description: Run tasks in jails
+ description:
+ - Run commands or put/fetch files to an existing jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_jail_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_jail_user
+'''
+
+import distutils.spawn
+import os
+import os.path
+import subprocess
+import traceback
+import ansible.constants as C
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local BSD Jail based connections '''
+
+ modified_jailname_key = 'conn_jail_name'
+
+ transport = 'community.general.jail'
+ # Pipelining may work. Someone needs to test by setting this to True and
+ # having pipelining=True in their ansible.cfg
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.jail = self._play_context.remote_addr
+ if self.modified_jailname_key in kwargs:
+ self.jail = kwargs[self.modified_jailname_key]
+
+ if os.geteuid() != 0:
+ raise AnsibleError("jail connection requires running as root")
+
+ self.jls_cmd = self._search_executable('jls')
+ self.jexec_cmd = self._search_executable('jexec')
+
+ if self.jail not in self.list_jails():
+ raise AnsibleError("incorrect jail name %s" % self.jail)
+
+ @staticmethod
+ def _search_executable(executable):
+ cmd = distutils.spawn.find_executable(executable)
+ if not cmd:
+ raise AnsibleError("%s command not found in PATH" % executable)
+ return cmd
+
+ def list_jails(self):
+ p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, stderr = p.communicate()
+
+ return to_text(stdout, errors='surrogate_or_strict').split()
+
+ def _connect(self):
+ ''' connect to the jail; nothing to do here '''
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the jail. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+
+ local_cmd = [self.jexec_cmd]
+ set_env = ''
+
+ if self._play_context.remote_user is not None:
+ local_cmd += ['-U', self._play_context.remote_user]
+ # update HOME since -U does not update the jail environment
+ set_env = 'HOME=~' + self._play_context.remote_user + ' '
+
+ local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
+
+ display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the jail '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to jail '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from jail to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py
new file mode 100644
index 00000000..b3b68a51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py
@@ -0,0 +1,228 @@
+# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
+ connection: lxc
+ short_description: Run tasks in lxc containers via lxc python library
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc python library
+ options:
+ remote_addr:
+ description:
+ - Container identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_lxc_host
+ executable:
+ default: /bin/sh
+ description:
+ - Shell executable
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxc_executable
+'''
+
+import os
+import shutil
+import traceback
+import select
+import fcntl
+import errno
+
+HAS_LIBLXC = False
+try:
+ import lxc as _lxc
+ HAS_LIBLXC = True
+except ImportError:
+ pass
+
+from ansible import constants as C
+from ansible import errors
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' Local lxc based connections '''
+
+ transport = 'community.general.lxc'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.container_name = self._play_context.remote_addr
+ self.container = None
+
+ def _connect(self):
+ ''' connect to the lxc; nothing to do here '''
+ super(Connection, self)._connect()
+
+ if not HAS_LIBLXC:
+ msg = "lxc bindings for python2 are not installed"
+ raise errors.AnsibleError(msg)
+
+ if self.container:
+ return
+
+ self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
+ self.container = _lxc.Container(self.container_name)
+ if self.container.state == "STOPPED":
+ raise errors.AnsibleError("%s is not running" % self.container_name)
+
+ def _communicate(self, pid, in_data, stdin, stdout, stderr):
+ buf = {stdout: [], stderr: []}
+ read_fds = [stdout, stderr]
+ if in_data:
+ write_fds = [stdin]
+ else:
+ write_fds = []
+ while len(read_fds) > 0 or len(write_fds) > 0:
+ try:
+ ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
+ except select.error as e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ for fd in ready_writes:
+ in_data = in_data[os.write(fd, in_data):]
+ if len(in_data) == 0:
+ write_fds.remove(fd)
+ for fd in ready_reads:
+ data = os.read(fd, 32768)
+ if not data:
+ read_fds.remove(fd)
+ buf[fd].append(data)
+
+ (pid, returncode) = os.waitpid(pid, 0)
+
+ return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
+
+ def _set_nonblocking(self, fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+ return fd
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the chroot '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ # python2-lxc needs bytes. python3-lxc needs text.
+ executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
+ local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
+
+ read_stdout, write_stdout = None, None
+ read_stderr, write_stderr = None, None
+ read_stdin, write_stdin = None, None
+
+ try:
+ read_stdout, write_stdout = os.pipe()
+ read_stderr, write_stderr = os.pipe()
+
+ kwargs = {
+ 'stdout': self._set_nonblocking(write_stdout),
+ 'stderr': self._set_nonblocking(write_stderr),
+ 'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
+ }
+
+ if in_data:
+ read_stdin, write_stdin = os.pipe()
+ kwargs['stdin'] = self._set_nonblocking(read_stdin)
+
+ self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
+ pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
+ if pid == -1:
+ msg = "failed to attach to container %s" % self.container_name
+ raise errors.AnsibleError(msg)
+
+ write_stdout = os.close(write_stdout)
+ write_stderr = os.close(write_stderr)
+ if read_stdin:
+ read_stdin = os.close(read_stdin)
+
+ return self._communicate(pid,
+ in_data,
+ write_stdin,
+ read_stdout,
+ read_stderr)
+ finally:
+ fds = [read_stdout,
+ write_stdout,
+ read_stderr,
+ write_stderr,
+ read_stdin,
+ write_stdin]
+ for fd in fds:
+ if fd:
+ os.close(fd)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to lxc '''
+ super(Connection, self).put_file(in_path, out_path)
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(in_path):
+ msg = "file or module does not exist: %s" % in_path
+ raise errors.AnsibleFileNotFound(msg)
+ try:
+ src_file = open(in_path, "rb")
+ except IOError:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to open input file to %s" % in_path)
+ try:
+ def write_file(args):
+ with open(out_path, 'wb+') as dst_file:
+ shutil.copyfileobj(src_file, dst_file)
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file to %s" % out_path
+ raise errors.AnsibleError(msg)
+ finally:
+ src_file.close()
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from lxc to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ try:
+ dst_file = open(out_path, "wb")
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to open output file %s" % out_path
+ raise errors.AnsibleError(msg)
+ try:
+ def write_file(args):
+ try:
+ with open(in_path, 'rb') as src_file:
+ shutil.copyfileobj(src_file, dst_file)
+ finally:
+ # this is needed in the lxc child process
+ # to flush internal python buffers
+ dst_file.close()
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file from %s to %s" % (in_path, out_path)
+ raise errors.AnsibleError(msg)
+ finally:
+ dst_file.close()
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py
new file mode 100644
index 00000000..754b4f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py
@@ -0,0 +1,125 @@
+# (c) 2016 Matt Clay <matt@mystile.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Matt Clay (@mattclay) <matt@mystile.com>
+ connection: lxd
+ short_description: Run tasks in lxc containers via lxc CLI
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc CLI
+ options:
+ remote_addr:
+ description:
+ - Container identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_lxd_host
+ executable:
+ description:
+ - shell to use for execution inside container
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxd_executable
+'''
+
+import os
+from distutils.spawn import find_executable
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ lxd based connections """
+
+ transport = 'community.general.lxd'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._host = self._play_context.remote_addr
+ self._lxc_cmd = find_executable("lxc")
+
+ if not self._lxc_cmd:
+ raise AnsibleError("lxc command not found in PATH")
+
+ if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
+ self._display.warning('lxd does not support remote_user, using container default: root')
+
+ def _connect(self):
+ """connect to lxd (nothing to do here) """
+ super(Connection, self)._connect()
+
+ if not self._connected:
+ self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ execute a command on the lxd host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
+
+ local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = process.communicate(in_data)
+
+ stdout = to_text(stdout)
+ stderr = to_text(stderr)
+
+ if stderr == "error: Container is not running.\n":
+ raise AnsibleConnectionFailure("container not running: %s" % self._host)
+
+ if stderr == "error: not found\n":
+ raise AnsibleConnectionFailure("container not found: %s" % self._host)
+
+ return process.returncode, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ put a file from local to lxd """
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
+
+ if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+
+ local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from lxd to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
+
+ local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def close(self):
+ """ close the connection (nothing to do here) """
+ super(Connection, self).close()
+
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py
new file mode 100644
index 00000000..4b035397
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py
@@ -0,0 +1,173 @@
+# Based on the docker connection plugin
+#
+# Connection plugin for configuring kubernetes containers with kubectl
+# (c) 2017, XuXinkun <xuxinkun@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - xuxinkun (!UNKNOWN)
+
+ connection: oc
+
+ short_description: Execute tasks in pods running on OpenShift.
+
+ description:
+ - Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
+ container platform.
+
+
+ requirements:
+ - oc (go binary)
+
+ options:
+ oc_pod:
+ description:
+ - Pod name. Required when the host name does not match pod name.
+ default: ''
+ vars:
+ - name: ansible_oc_pod
+ env:
+ - name: K8S_AUTH_POD
+ oc_container:
+ description:
+ - Container name. Required when a pod contains more than one container.
+ default: ''
+ vars:
+ - name: ansible_oc_container
+ env:
+ - name: K8S_AUTH_CONTAINER
+ oc_namespace:
+ description:
+ - The namespace of the pod
+ default: ''
+ vars:
+ - name: ansible_oc_namespace
+ env:
+ - name: K8S_AUTH_NAMESPACE
+ oc_extra_args:
+ description:
+ - Extra arguments to pass to the oc command line.
+ default: ''
+ vars:
+ - name: ansible_oc_extra_args
+ env:
+ - name: K8S_AUTH_EXTRA_ARGS
+ oc_kubeconfig:
+ description:
+ - Path to a oc config file. Defaults to I(~/.kube/conig)
+ default: ''
+ vars:
+ - name: ansible_oc_kubeconfig
+ - name: ansible_oc_config
+ env:
+ - name: K8S_AUTH_KUBECONFIG
+ oc_context:
+ description:
+ - The name of a context found in the K8s config file.
+ default: ''
+ vars:
+ - name: ansible_oc_context
+ env:
+ - name: K8S_AUTH_CONTEXT
+ oc_host:
+ description:
+ - URL for accessing the API.
+ default: ''
+ vars:
+ - name: ansible_oc_host
+ - name: ansible_oc_server
+ env:
+ - name: K8S_AUTH_HOST
+ - name: K8S_AUTH_SERVER
+ oc_token:
+ description:
+ - API authentication bearer token.
+ vars:
+ - name: ansible_oc_token
+ - name: ansible_oc_api_key
+ env:
+ - name: K8S_AUTH_TOKEN
+ - name: K8S_AUTH_API_KEY
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_cert_file
+ - name: ansible_oc_client_cert
+ env:
+ - name: K8S_AUTH_CERT_FILE
+ aliases: [ oc_cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_key_file
+ - name: ansible_oc_client_key
+ env:
+ - name: K8S_AUTH_KEY_FILE
+ aliases: [ oc_key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_ssl_ca_cert
+ - name: ansible_oc_ca_cert
+ env:
+ - name: K8S_AUTH_SSL_CA_CERT
+ aliases: [ oc_ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificate. Defaults to I(true).
+ default: ''
+ vars:
+ - name: ansible_oc_verify_ssl
+ - name: ansible_oc_validate_certs
+ env:
+ - name: K8S_AUTH_VERIFY_SSL
+ aliases: [ oc_verify_ssl ]
+'''
+
+from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
+
+
+CONNECTION_TRANSPORT = 'community.general.oc'
+
+CONNECTION_OPTIONS = {
+ 'oc_container': '-c',
+ 'oc_namespace': '-n',
+ 'oc_kubeconfig': '--config',
+ 'oc_context': '--context',
+ 'oc_host': '--server',
+ 'client_cert': '--client-certificate',
+ 'client_key': '--client-key',
+ 'ca_cert': '--certificate-authority',
+ 'validate_certs': '--insecure-skip-tls-verify',
+ 'oc_token': '--token'
+}
+
+
+class Connection(KubectlConnection):
+ ''' Local oc based connections '''
+ transport = CONNECTION_TRANSPORT
+ connection_options = CONNECTION_OPTIONS
+ documentation = DOCUMENTATION
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py
new file mode 100644
index 00000000..ed03b3d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py
@@ -0,0 +1,159 @@
+# Based on the buildah connection plugin
+# Copyright (c) 2017 Ansible Project
+# 2018 Kushal Das
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+#
+# Written by: Kushal Das (https://github.com/kushaldas)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ connection: qubes
+ short_description: Interact with an existing QubesOS AppVM
+
+ description:
+ - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
+
+ author: Kushal Das (@kushaldas)
+
+
+ options:
+ remote_addr:
+ description:
+ - vm name
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ remote_user:
+ description:
+ - The user to execute as inside the vm.
+ default: The *user* account as default in Qubes OS.
+ vars:
+ - name: ansible_user
+# keyword:
+# - name: hosts
+'''
+
+import shlex
+import shutil
+
+import os
+import base64
+import subprocess
+
+import ansible.constants as C
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, ensure_connect
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# this _has to be_ named Connection
+class Connection(ConnectionBase):
+ """This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers."""
+
+ # String used to identify this Connection class from other classes
+ transport = 'community.general.qubes'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._remote_vmname = self._play_context.remote_addr
+ self._connected = False
+ # Default username in Qubes
+ self.user = "user"
+ if self._play_context.remote_user:
+ self.user = self._play_context.remote_user
+
+ def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"):
+ """run qvm-run executable
+
+ :param cmd: cmd string for remote system
+ :param in_data: data passed to qvm-run-vm's stdin
+ :return: return code, stdout, stderr
+ """
+ display.vvvv("CMD: ", cmd)
+ if not cmd.endswith("\n"):
+ cmd = cmd + "\n"
+ local_cmd = []
+
+ # For dom0
+ local_cmd.extend(["qvm-run", "--pass-io", "--service"])
+ if self.user != "user":
+ # Means we have a remote_user value
+ local_cmd.extend(["-u", self.user])
+
+ local_cmd.append(self._remote_vmname)
+
+ local_cmd.append(shell)
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ display.vvvv("Local cmd: ", local_cmd)
+
+ display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Here we are writing the actual command to the remote bash
+ p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict'))
+ stdout, stderr = p.communicate(input=in_data)
+ return p.returncode, stdout, stderr
+
+ def _connect(self):
+ """No persistent connection is being maintained."""
+ super(Connection, self)._connect()
+ self._connected = True
+
+ @ensure_connect
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """Run specified command in a running QubesVM """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.vvvv("CMD IS: %s" % cmd)
+
+ rc, stdout, stderr = self._qubes(cmd)
+
+ display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
+ return rc, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ Place a local file located in 'in_path' inside VM at 'out_path' """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ with open(in_path, "rb") as fobj:
+ source_data = fobj.read()
+
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
+ # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
+ # hope it will have appropriate permissions
+ if retcode == 127:
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
+
+ if retcode != 0:
+ raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
+
+ def fetch_file(self, in_path, out_path):
+ """Obtain file specified via 'in_path' from the container and place it at 'out_path' """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ # We are running in dom0
+ cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
+ with open(out_path, "wb") as fobj:
+ p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
+ p.communicate()
+ if p.returncode != 0:
+ raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
+
+ def close(self):
+ """ Closing the connection """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py
new file mode 100644
index 00000000..ac521e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py
@@ -0,0 +1,106 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Based on func.py
+# (c) 2014, Michael Scherer <misc@zarb.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@mscherer) <misc@zarb.org>
+ connection: saltstack
+ short_description: Allow ansible to piggyback on salt minions
+ description:
+ - This allows you to use existing Saltstack infrastructure to connect to targets.
+'''
+
+import re
+import os
+import pty
+import codecs
+import subprocess
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six.moves import cPickle
+
+HAVE_SALTSTACK = False
+try:
+ import salt.client as sc
+ HAVE_SALTSTACK = True
+except ImportError:
+ pass
+
+import os
+from ansible import errors
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' Salt-based connections '''
+
+ has_pipelining = False
+ # while the name of the product is salt, naming that module salt cause
+ # trouble with module import
+ transport = 'community.general.saltstack'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+ self.host = self._play_context.remote_addr
+
+ def _connect(self):
+ if not HAVE_SALTSTACK:
+ raise errors.AnsibleError("saltstack is not installed")
+
+ self.client = sc.LocalClient()
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, sudoable=False, in_data=None):
+ ''' run a command on the remote minion '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ if in_data:
+ raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ self._display.vvv("EXEC %s" % (cmd), host=self.host)
+ # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
+ res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
+ if self.host not in res:
+ raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
+
+ p = res[self.host]
+ return (p['retcode'], p['stdout'], p['stderr'])
+
+ def _normalize_path(self, path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ out_path = self._normalize_path(out_path, '/')
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ with open(in_path, 'rb') as in_fh:
+ content = in_fh.read()
+ self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
+
+ # TODO test it
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path = self._normalize_path(in_path, '/')
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
+ open(out_path, 'wb').write(content)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py
new file mode 100644
index 00000000..755081a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py
@@ -0,0 +1,200 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ connection: zone
+ short_description: Run tasks in a zone instance
+ description:
+ - Run commands or put/fetch files to an existing zone
+ options:
+ remote_addr:
+ description:
+ - Zone identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_zone_host
+'''
+
+import distutils.spawn
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local zone based connections '''
+
+ transport = 'community.general.zone'
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.zone = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("zone connection requires running as root")
+
+ self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
+ self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
+
+ if self.zone not in self.list_zones():
+ raise AnsibleError("incorrect zone name %s" % self.zone)
+
+ @staticmethod
+ def _search_executable(executable):
+ cmd = distutils.spawn.find_executable(executable)
+ if not cmd:
+ raise AnsibleError("%s command not found in PATH" % executable)
+ return cmd
+
+ def list_zones(self):
+ process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ zones = []
+ for l in process.stdout.readlines():
+ # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
+ s = l.split(':')
+ if s[1] != 'global':
+ zones.append(s[1])
+
+ return zones
+
+ def get_zone_path(self):
+ # solaris10vm# zoneadm -z cswbuild list -p
+ # -:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
+ process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # stdout, stderr = p.communicate()
+ path = process.stdout.readlines()[0].split(':')[3]
+ return path + '/root'
+
+ def _connect(self):
+ ''' connect to the zone; nothing to do here '''
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the zone. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+ # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
+ # this through /bin/sh -c here. Instead it goes through the shell
+ # that zlogin selects.
+ local_cmd = [self.zlogin_cmd, self.zone, cmd]
+ local_cmd = map(to_bytes, local_cmd)
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.zone)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the zone '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to zone '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(in_path, 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from zone to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("zone connection requires dd command in the zone")
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py
new file mode 100644
index 00000000..06872543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # GCP doc fragment.
+ DOCUMENTATION = r'''
+options:
+ project:
+ description:
+ - The Google Cloud Platform project to use.
+ type: str
+ auth_kind:
+ description:
+ - The type of credential used.
+ type: str
+ required: true
+ choices: [ application, machineaccount, serviceaccount ]
+ service_account_contents:
+ description:
+ - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
+ type: jsonarg
+ service_account_file:
+ description:
+ - The path of a Service Account JSON file if serviceaccount is selected as type.
+ type: path
+ service_account_email:
+ description:
+ - An optional service account email address if machineaccount is selected
+ and the user does not wish to use the default email.
+ type: str
+ scopes:
+ description:
+ - Array of scopes to be used.
+ type: list
+ elements: str
+ env_type:
+ description:
+ - Specifies which Ansible environment you're running this module within.
+ - This should not be set unless you know what you're doing.
+ - This only alters the User Agent string for any API requests.
+ type: str
+notes:
+ - for authentication, you can set service_account_file using the
+ c(gcp_service_account_file) env variable.
+ - for authentication, you can set service_account_contents using the
+ c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
+ - For authentication, you can set service_account_email using the
+ C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
+ - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
+ variable.
+ - For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+ - The I(service_account_email) and I(service_account_file) options are
+ mutually exclusive.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py
new file mode 100644
index 00000000..c3d0d3ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for ONTAP (na_cdot)
+ ONTAP = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ username:
+ required: true
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
+ - Ansible 2.2
+ - netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
+
+notes:
+ - The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
+
+'''
+
+ # Documentation fragment for SolidFire
+ SOLIDFIRE = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the SolidFire cluster.
+ username:
+ required: true
+ description:
+ - Please ensure that the user has the adequate permissions. For more information, please read the official documentation
+ U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+
+requirements:
+ - The modules were developed with SolidFire 10.1
+ - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
+
+notes:
+ - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
+
+'''
+
+ # Documentation fragment for ONTAP (na_ontap)
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ type: str
+ required: true
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ required: true
+ aliases: [ pass ]
+ https:
+ description:
+ - Enable and disable https
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - Always -- will always use the REST API
+ - Never -- will always use the ZAPI
+ - Auto -- will try to use the REST Api
+ default: Auto
+ choices: ['Never', 'Always', 'Auto']
+ type: str
+
+
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
+ - Ansible 2.6
+ - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
+ - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
+ - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+
+notes:
+ - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
new file mode 100644
index 00000000..f9c9640b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Alicloud only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ alicloud_access_key:
+ description:
+ - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
+ C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ aliases: ['access_key_id', 'access_key']
+ type: str
+ alicloud_secret_key:
+ description:
+ - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
+ C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ aliases: ['secret_access_key', 'secret_key']
+ type: str
+ alicloud_region:
+ description:
+ - The Alibaba Cloud region to use. If not specified then the value of environment variable
+ C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
+ aliases: ['region', 'region_id']
+ required: true
+ type: str
+ alicloud_security_token:
+ description:
+ - The Alibaba Cloud security token. If not specified then the value of environment variable
+ C(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ aliases: ['security_token']
+ type: str
+ alicloud_assume_role:
+ description:
+ - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
+ - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
+ I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
+ type: dict
+ aliases: ['assume_role']
+ alicloud_assume_role_arn:
+ description:
+ - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
+ it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
+ ansible will execute with provided credentials.
+ aliases: ['assume_role_arn']
+ type: str
+ alicloud_assume_role_session_name:
+ description:
+ - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
+ 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
+ ALICLOUD_ASSUME_ROLE_SESSION_NAME
+ aliases: ['assume_role_session_name']
+ type: str
+ alicloud_assume_role_session_expiration:
+ description:
+ - The Alibaba Cloud session_expiration. The time after which the established session for assuming
+ role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
+ value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
+ aliases: ['assume_role_session_expiration']
+ type: int
+ ecs_role_name:
+ description:
+ - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
+ section of the Alibaba Cloud console.
+ - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
+ metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/<ecs_role_name>) to obtain the STS
+ credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
+ credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
+ aliases: ['role_name']
+ type: str
+ profile:
+ description:
+ - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
+ ALICLOUD_PROFILE environment variable.
+ type: str
+ shared_credentials_file:
+ description:
+ - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
+ environment variable.
+ - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
+ type: str
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+notes:
+ - If parameters are not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
+ C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
+ C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
+ C(ALICLOUD_SECURITY_TOKEN),
+ C(ALICLOUD_ECS_ROLE_NAME),
+ C(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ C(ALICLOUD_PROFILE),
+ C(ALICLOUD_ASSUME_ROLE_ARN),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
+ - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
+ ALICLOUD region, when required, but this can also be configured in the footmark config file
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
new file mode 100644
index 00000000..6f590611
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The resolvable endpoint for the API
+ type: str
+ api_username:
+ description:
+ - The username to use for authentication against the API
+ type: str
+ api_password:
+ description:
+ - The password to use for authentication against the API
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to validate SSL certs when supplying a https endpoint.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
new file mode 100644
index 00000000..02435e25
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ region:
+ description:
+ - The target region.
+ - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
+ - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
+ - Note that the default value "na" stands for "North America".
+ - The module prepends 'dd-' to the region choice.
+ type: str
+ default: na
+ mcp_user:
+ description:
+ - The username used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
+ type: str
+ mcp_password:
+ description:
+ - The password used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - Required if I(mcp_user) is specified.
+ type: str
+ location:
+ description:
+ - The target datacenter.
+ type: str
+ required: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on private instances of the CloudControl API that use self-signed certificates.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
new file mode 100644
index 00000000..ac3deab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data ("wait-for-completion" parameters) doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ wait:
+ description:
+ - Should we wait for the task to complete before moving onto the next.
+ type: bool
+ default: no
+ wait_time:
+ description:
+ - The maximum amount of time (in seconds) to wait for the task to complete.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 600
+ wait_poll_interval:
+ description:
+ - The amount of time (in seconds) to wait between checks for task completion.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 2
+ '''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py
new file mode 100644
index 00000000..ad3efb1f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Docker doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: localhost
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: no
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+ - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+ For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+ - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+ In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+ and use C($DOCKER_CONFIG/config.json) otherwise.
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
+
+ DOCKER_PY_1_DOCUMENTATION = r'''
+options: {}
+requirements:
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
+ install the C(docker) Python module. Note that both modules should *not*
+ be installed at the same time. Also note that when both modules are installed
+ and one of them is uninstalled, the other might no longer function and a
+ reinstall of it is required."
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
+ # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
+
+ DOCKER_PY_2_DOCUMENTATION = r'''
+options: {}
+requirements:
+ - "Python >= 2.7"
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ This module does *not* work with docker-py."
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py
new file mode 100644
index 00000000..cce76823
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Luca Lorenzetto (@remix_tj) <lorenzetto.luca@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for EMC VNX.
+'''
+
+ # Documentation fragment for VNX (emc_vnx)
+ EMC_VNX = r'''
+options:
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
+requirements:
+ - An EMC VNX Storage device.
+ - Ansible 2.7.
+ - storops (0.5.10 or greater). Install using 'pip install storops'.
+notes:
+ - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py
new file mode 100644
index 00000000..32a595f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ hetzner_user:
+ description: The username for the Robot webservice user.
+ type: str
+ required: yes
+ hetzner_password:
+ description: The password for the Robot webservice user.
+ type: str
+ required: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
new file mode 100644
index 00000000..fa51ccdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
@@ -0,0 +1,35 @@
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HPE 3PAR doc fragment
+ DOCUMENTATION = '''
+options:
+ storage_system_ip:
+ description:
+ - The storage system IP address.
+ type: str
+ required: true
+ storage_system_password:
+ description:
+ - The storage system password.
+ type: str
+ required: true
+ storage_system_username:
+ description:
+ - The storage system user name.
+ type: str
+ required: true
+
+requirements:
+ - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
+ - WSAPI service should be enabled on the 3PAR storage array.
+notes:
+ - check_mode not supported
+ '''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py
new file mode 100644
index 00000000..80cd0465
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py
@@ -0,0 +1,65 @@
+# Copyright: (c) 2018, Huawei Inc.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HWC doc fragment.
+ DOCUMENTATION = '''
+options:
+ identity_endpoint:
+ description:
+ - The Identity authentication URL.
+ type: str
+ required: true
+ user:
+ description:
+ - The user name to login with (currently only user names are
+ supported, and not user IDs).
+ type: str
+ required: true
+ password:
+ description:
+ - The password to login with.
+ type: str
+ required: true
+ domain:
+ description:
+ - The name of the Domain to scope to (Identity v3).
+ (currently only domain names are supported, and not domain IDs).
+ type: str
+ required: true
+ project:
+ description:
+ - The name of the Tenant (Identity v2) or Project (Identity v3).
+ (currently only project names are supported, and not
+ project IDs).
+ type: str
+ required: true
+ region:
+ description:
+ - The region to which the project belongs.
+ type: str
+ id:
+ description:
+ - The id of resource to be managed.
+ type: str
+notes:
+ - For authentication, you can set identity_endpoint using the
+ C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable.
+ - For authentication, you can set user using the
+ C(ANSIBLE_HWC_USER) env variable.
+ - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env
+ variable.
+ - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env
+ variable.
+ - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env
+ variable.
+ - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
new file mode 100644
index 00000000..0d8eb5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # ibm_storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ username:
+ description:
+ - Management user on the spectrum accelerate storage system.
+ type: str
+ required: True
+ password:
+ description:
+ - Password for username on the spectrum accelerate storage system.
+ type: str
+ required: True
+ endpoints:
+ description:
+ - The hostname or management IP of Spectrum Accelerate storage system.
+ type: str
+ required: True
+notes:
+ - This module requires pyxcli python library.
+ Use 'pip install pyxcli' in order to get pyxcli.
+requirements:
+ - python >= 2.7
+ - pyxcli
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
new file mode 100644
index 00000000..a31c84cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for influxdb modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address on which InfluxDB server is listening.
+ - Since Ansible 2.5, defaulted to localhost.
+ type: str
+ default: localhost
+ username:
+ description:
+ - Username that will be used to authenticate against InfluxDB server.
+ - Alias C(login_username) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_username ]
+ password:
+ description:
+ - Password that will be used to authenticate against InfluxDB server.
+ - Alias C(login_password) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_password ]
+ port:
+ description:
+ - The port on which InfluxDB server is listening
+ type: int
+ default: 8086
+ path:
+ description:
+ - The path on which InfluxDB server is accessible
+ - Only available when using python-influxdb >= 5.1.0
+ type: str
+ version_added: '0.2.0'
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ ssl:
+ description:
+ - Use https instead of http to connect to InfluxDB server.
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Number of seconds Requests will wait for client to establish a connection.
+ type: int
+ retries:
+ description:
+ - Number of retries client will try before aborting.
+ - C(0) indicates try until success.
+ - Only available when using python-influxdb >= 4.1.0
+ type: int
+ default: 3
+ use_udp:
+ description:
+ - Use UDP to connect to InfluxDB server.
+ type: bool
+ default: false
+ udp_port:
+ description:
+ - UDP port to connect to InfluxDB server.
+ type: int
+ default: 4444
+ proxies:
+ description:
+ - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py
new file mode 100644
index 00000000..47bcee60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-18, Ansible Project
+# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for FreeIPA/IPA modules
+ DOCUMENTATION = r'''
+options:
+ ipa_port:
+ description:
+ - Port of FreeIPA / IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
+ - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: int
+ default: 443
+ ipa_host:
+ description:
+ - IP or hostname of IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
+ - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
+ - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: ipa.example.com
+ ipa_user:
+ description:
+ - Administrative account used on IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
+ - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: admin
+ ipa_pass:
+ description:
+ - Password of administrative user.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
+ - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
+ - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
+ - If GSSAPI is not available, the usage of 'ipa_pass' is required.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ ipa_prot:
+ description:
+ - Protocol used by IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
+ - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ choices: [ http, https ]
+ default: https
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ ipa_timeout:
+ description:
+ - Specifies idle timeout (in seconds) for the connection.
+ - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
+ - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ type: int
+ default: 10
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
new file mode 100644
index 00000000..e664d7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
+
+ auth_client_id:
+ description:
+ - OpenID Connect I(client_id) to authenticate to the API with.
+ type: str
+ default: admin-cli
+
+ auth_realm:
+ description:
+ - Keycloak realm name to authenticate to for API access.
+ type: str
+ required: true
+
+ auth_client_secret:
+ description:
+ - Client Secret to use in conjunction with I(auth_client_id) (if required).
+ type: str
+
+ auth_username:
+ description:
+ - Username to authenticate for API access with.
+ type: str
+ required: true
+ aliases:
+ - username
+
+ auth_password:
+ description:
+ - Password to authenticate for API access with.
+ type: str
+ required: true
+ aliases:
+ - password
+
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py
new file mode 100644
index 00000000..1d3c98fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
+ resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ wait:
+ description:
+ - "I(True) if the module should wait for the resource to get into desired state."
+ type: bool
+ default: yes
+ force:
+ description:
+ - If set to C(no), and I(state) is C(present), an existing object will be replaced.
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - The amount of time in seconds the module should wait for the resource to get into desired state.
+ type: int
+ default: 120
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ version_added: '0.2.0'
+ memory:
+ description:
+ - The amount of memory to be requested by virtual machine.
+ - For example 1024Mi.
+ type: str
+ memory_limit:
+ description:
+ - The maximum memory to be used by virtual machine.
+ - For example 1024Mi.
+ type: str
+ machine_type:
+ description:
+ - QEMU machine type is the actual chipset of the virtual machine.
+ type: str
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - If more than one merge type is given, the merge types will be tried in order.
+ - "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources, as
+ Custom Resource Definitions typically aren't updatable by the usual strategic merge."
+ - "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ cpu_shares:
+ description:
+ - "Specify CPU shares."
+ type: int
+ cpu_limit:
+ description:
+ - "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
+ every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
+ type: int
+ cpu_cores:
+ description:
+ - "Number of CPU cores."
+ type: int
+ cpu_model:
+ description:
+ - "CPU model."
+ - "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
+ - "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
+ - "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
+ - "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
+ type: str
+ bootloader:
+ description:
+ - "Specify the bootloader of the virtual machine."
+ - "All virtual machines use BIOS by default for booting."
+ type: str
+ smbios_uuid:
+ description:
+ - "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
+ type: str
+ cpu_features:
+ description:
+ - "List of dictionary to fine-tune features provided by the selected CPU model."
+ - "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
+ - "I(Note): In case a policy is omitted for a feature, it will default to require."
+ - "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
+ type: list
+ headless:
+ description:
+ - "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
+ - "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
+ compatible and comes with a memory size of 16 MB."
+ hugepage_size:
+ description:
+ - "Specify huge page size."
+ type: str
+ tablets:
+ description:
+ - "Specify tablets to be used as input devices"
+ type: list
+ hostname:
+ description:
+ - "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
+ name will be used."
+ subdomain:
+ description:
+ - "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
+ the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
+ itself can pick up a hostname."
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+notes:
+ - "In order to use this module you have to install Openshift Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=openshift>=0.8.2)"
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py
new file mode 100644
index 00000000..ba5dc332
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ disks:
+ description:
+ - List of dictionaries which specify disks of the virtual machine.
+ - "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
+ - "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
+ - Each disk must have specified a I(volume) that declares which volume type of the disk
+ All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
+ type: list
+ labels:
+ description:
+ - Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
+ specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
+ imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
+ Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
+ - More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
+ type: dict
+ interfaces:
+ description:
+ - An interface defines a virtual network interface of a virtual machine (also called a frontend).
+ - All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
+ - Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
+ All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
+ type: list
+ cloud_init_nocloud:
+ description:
+ - "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
+ as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
+ More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
+ type: dict
+ affinity:
+ description:
+ - "Describes node affinity scheduling rules for the vm."
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
+ node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
+ each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
+ a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
+ C(term); the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
+ the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
+ system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
+ each C(term) are intersected, i.e. all terms must be satisfied."
+ type: dict
+ node_affinity:
+ description:
+ - "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
+ match_expressions; the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
+ the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
+ may or may not try to eventually evict the vm from its node."
+ type: dict
+ anti_affinity:
+ description:
+ - "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
+ choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
+ etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
+ the corresponding C(term); the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
+ update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
+ corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py
new file mode 100644
index 00000000..890c22ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard LDAP documentation fragment
+ DOCUMENTATION = r'''
+options:
+ bind_dn:
+ description:
+ - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
+ - If this is blank, we'll use an anonymous bind.
+ type: str
+ bind_pw:
+ description:
+ - The password to use with I(bind_dn).
+ type: str
+ dn:
+ required: true
+ description:
+ - The DN of the entry to add or remove.
+ type: str
+ server_uri:
+ description:
+ - A URI to the LDAP server.
+ - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
+ type: str
+ default: ldapi:///
+ start_tls:
+ description:
+ - If true, we'll use the START_TLS LDAP extension.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), SSL certificates will not be validated.
+ - This should only be used on sites using self-signed certificates.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
new file mode 100644
index 00000000..c55eca16
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pylxca documentation fragment
+ DOCUMENTATION = r'''
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+
+options:
+ login_user:
+ description:
+ - The username for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ login_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ auth_url:
+ description:
+ - lxca https full web address
+ type: str
+ required: true
+
+requirements:
+ - pylxca
+
+notes:
+ - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
+ - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
+ - Check mode is not supported.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
new file mode 100644
index 00000000..b610b512
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard ManageIQ documentation fragment
+ DOCUMENTATION = r'''
+options:
+ manageiq_connection:
+ description:
+ - ManageIQ connection configuration information.
+ required: false
+ type: dict
+ suboptions:
+ url:
+ description:
+ - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
+ type: str
+ required: false
+ username:
+ description:
+ - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
+ type: str
+ password:
+ description:
+ - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
+ type: str
+ token:
+ description:
+ - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
+ type: str
+ validate_certs:
+ description:
+ - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+ ca_cert:
+ description:
+ - The path to a CA bundle file or directory with certificates. defaults to None.
+ type: str
+ aliases: [ ca_bundle_path ]
+
+requirements:
+ - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py
new file mode 100644
index 00000000..cf3130bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ provider:
+ description:
+ - A dict object containing connection details.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ instance of NIOS WAPI over REST
+ - Value can also be specified using C(INFOBLOX_HOST) environment
+ variable.
+ type: str
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to
+ the remote instance of NIOS.
+ - Value can also be specified using C(INFOBLOX_USERNAME) environment
+ variable.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to
+ the remote instance of NIOS.
+ - Value can also be specified using C(INFOBLOX_PASSWORD) environment
+ variable.
+ type: str
+ validate_certs:
+ description:
+ - Boolean value to enable or disable verifying SSL certificates
+ - Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
+ variable.
+ type: bool
+ default: no
+ aliases: [ ssl_verify ]
+ http_request_timeout:
+ description:
+ - The amount of time before to wait before receiving a response
+ - Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
+ variable.
+ type: int
+ default: 10
+ max_retries:
+ description:
+ - Configures the number of attempted retries before the connection
+ is declared usable
+ - Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
+ variable.
+ type: int
+ default: 3
+ wapi_version:
+ description:
+ - Specifies the version of WAPI to use
+ - Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
+ variable.
+ - Until ansible 2.8 the default WAPI was 1.4
+ type: str
+ default: '2.1'
+ max_results:
+ description:
+ - Specifies the maximum number of objects to be returned,
+ if set to a negative number the appliance will return an error when the
+ number of returned objects would exceed the setting.
+ - Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
+ variable.
+ type: int
+ default: 1000
+notes:
+ - "This module must be run locally, which can be achieved by specifying C(connection: local)."
+ - Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py
new file mode 100644
index 00000000..3845c541
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - FQDN of Nomad server.
+ required: true
+ type: str
+ use_ssl:
+ description:
+ - Use TLS/SSL connection.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - Timeout (in seconds) for the request to Nomad.
+ type: int
+ default: 5
+ validate_certs:
+ description:
+ - Enable TLS/SSL certificate validation.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - Path of certificate for TLS/SSL.
+ type: path
+ client_key:
+ description:
+ - Path of certificate's private key for TLS/SSL.
+ type: path
+ namespace:
+ description:
+ - Namespace for Nomad.
+ type: str
+ token:
+ description:
+ - ACL token for authentification.
+ type: str
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py
new file mode 100644
index 00000000..bbbcbeea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # OneView doc fragment
+ DOCUMENTATION = r'''
+options:
+ config:
+ description:
+ - Path to a .json configuration file containing the OneView client configuration.
+ The configuration file is optional and when used should be present in the host running the ansible commands.
+ If the file path is not provided, the configuration will be loaded from environment variables.
+ For links to example configuration files or how to use the environment variables verify the notes section.
+ type: path
+
+requirements:
+ - python >= 2.7.9
+
+notes:
+ - "A sample configuration file for the config parameter can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
+ - "Check how to use environment variables for configuration at:
+ U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
+ - "Additional Playbooks for the HPE OneView Ansible modules can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
+ - "The OneView API version used will directly affect returned and expected fields in resources.
+ Information on setting the desired API version and can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
+ '''
+
+ VALIDATEETAG = r'''
+options:
+ validate_etag:
+ description:
+ - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
+ for the resource matches the ETag provided in the data.
+ type: bool
+ default: yes
+'''
+
+ FACTSPARAMS = r'''
+options:
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(filter): A general filter/query string to narrow the list of items returned.
+ - C(sort): The sort order of the returned data set."
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py
new file mode 100644
index 00000000..4ad35bab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Online OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Online API URL
+ type: str
+ default: 'https://api.online.net'
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Online API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ validate_certs:
+ description:
+ - Validate SSL certs of the Online API.
+ type: bool
+ default: yes
+notes:
+ - Also see the API documentation on U(https://console.online.net/en/api/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
+ - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
+ environment variable.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
new file mode 100644
index 00000000..08b614a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # OpenNebula common documentation
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The ENDPOINT URL of the XMLRPC server.
+ - If not specified then the value of the ONE_URL environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_endpoint
+ api_username:
+ description:
+ - The name of the user for XMLRPC authentication.
+ - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
+ type: str
+ api_password:
+ description:
+ - The password or token for XMLRPC authentication.
+ - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_token
+ validate_certs:
+ description:
+ - Whether to validate the SSL certificates or not.
+ - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
+ type: bool
+ default: yes
+ wait_timeout:
+ description:
+ - Time to wait for the desired state to be reached before timeout, in seconds.
+ type: int
+ default: 300
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
new file mode 100644
index 00000000..7ab7c155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport. Note this argument
+ does not affect the SSH argument.
+ type: str
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote
+ device. This value applies to either I(cli) or I(rest). The port
+ value will default to the appropriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443). Note
+ this argument does not affect the SSH transport.
+ type: int
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to
+ the remote device. This value is used to authenticate
+ either the CLI login or the eAPI authentication depending on which
+ transport is used. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(rest) transports. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device
+ for either connecting or sending commands. If the timeout is
+ exceeded before the operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to
+ the remote device. This argument is only used for the I(cli)
+ transports. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ type: path
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over ssh, cli or REST.
+ required: true
+ type: str
+ choices: [ cli, rest, ssh ]
+ default: ssh
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to C(yes) only when the
+ I(transport) argument is configured as rest. If the transport
+ argument is not I(rest), this value is ignored.
+ type: bool
+ default: yes
+ provider:
+ description:
+ - Convenience method that allows all I(openswitch) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py
new file mode 100644
index 00000000..776c8f52
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ requirements:
+ - "python >= 2.7"
+ - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
+ notes:
+ - For OCI python sdk configuration, please refer to
+ U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
+ options:
+ config_file_location:
+ description:
+ - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
+ if any, is used. Otherwise, defaults to ~/.oci/config.
+ type: str
+ config_profile_name:
+ description:
+ - The profile to load from the config file referenced by C(config_file_location). If not set, then the
+ value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
+ "DEFAULT" profile in C(config_file_location).
+ default: "DEFAULT"
+ type: str
+ api_user:
+ description:
+ - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
+ value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
+ is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
+ please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_fingerprint:
+ description:
+ - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
+ environment variable, if any, is used. This option is required if the key fingerprint is not
+ specified through a configuration file (See C(config_file_location)). To get the key pair's
+ fingerprint value please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_key_file:
+ description:
+ - Full path and filename of the private key (in PEM format). If not set, then the value of the
+ OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
+ not specified through a configuration file (See C(config_file_location)). If the key is encrypted
+ with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
+ type: str
+ api_user_key_pass_phrase:
+ description:
+ - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
+ the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
+ key passphrase is not specified through a configuration file (See C(config_file_location)).
+ type: str
+ auth_type:
+ description:
+ - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
+ authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
+ used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
+ if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
+ when running ansible playbooks within an OCI compute instance.
+ choices: ['api_key', 'instance_principal']
+ default: 'api_key'
+ type: str
+ tenancy:
+ description:
+ - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
+ used. This option is required if the tenancy OCID is not specified through a configuration file
+ (See C(config_file_location)). To get the tenancy OCID, please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
+ type: str
+ region:
+ description:
+ - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
+ value of the OCI_REGION variable, if any, is used. This option is required if the region is
+ not specified through a configuration file (See C(config_file_location)). Please refer to
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
+ on OCI regions.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
new file mode 100644
index 00000000..d8f22101
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ force_create:
+ description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an
+ idempotent operation, and doesn't create the resource if it already exists. Setting this option
+ to true, forcefully creates a copy of the resource, even if it already exists.This option is
+ mutually exclusive with I(key_by).
+ default: False
+ type: bool
+ key_by:
+ description: The list of comma-separated attributes of this resource which should be used to uniquely
+ identify an instance of the resource. By default, all the attributes of a resource except
+ I(freeform_tags) are used to uniquely identify a resource.
+ type: list
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
new file mode 100644
index 00000000..01f92f18
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ display_name:
+ description: Use I(display_name) along with the other options to return only resources that match the given
+ display name exactly.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
new file mode 100644
index 00000000..9a7b0226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ name:
+ description: Use I(name) along with the other options to return only resources that match the given name
+ exactly.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
new file mode 100644
index 00000000..1d9cae0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ defined_tags:
+ description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more
+ information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ freeform_tags:
+ description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name,
+ type, or namespace. For more information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
new file mode 100644
index 00000000..248319c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ wait:
+ description: Whether to wait for create or delete operation to complete.
+ default: yes
+ type: bool
+ wait_timeout:
+ description: Time, in seconds, to wait when I(wait=yes).
+ default: 1200
+ type: int
+ wait_until:
+ description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default,
+ when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
+ RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
+ TERMINATED lifecycle state during delete operation.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py
new file mode 100644
index 00000000..43b9b37b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # info standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ fetch_nested:
+ description:
+ - If I(yes) the module will fetch additional data from the API.
+ - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
+ Only the attributes of the current entity. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes).
+ type: bool
+ default: false
+ nested_attributes:
+ description:
+ - Specifies list of the attributes which should be fetched from the API.
+ - This parameter apply only when C(fetch_nested) is I(true).
+ type: list
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ - C(username)[I(required)] - The name of the user, something like I(admin@internal).
+ Default value is set by I(OVIRT_USERNAME) environment variable.
+ - "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
+ - "C(url)- A string containing the API URL of the server, usually
+ something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
+ Either C(url) or C(hostname) is required."
+ - "C(hostname) - A string containing the hostname of the server, usually
+ something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
+ Either C(url) or C(hostname) is required."
+ - "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
+ - "C(insecure) - A boolean flag that indicates if the server TLS
+ certificate and host name should be checked."
+ - "C(ca_file) - A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If `C(ca_file)` parameter is not set, system wide
+ CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
+ - "C(kerberos) - A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+ - "C(headers) - Dictionary of HTTP headers to be added to each API call."
+ type: dict
+ required: true
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.3.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0"
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py
new file mode 100644
index 00000000..a207bc35
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Postgres documentation fragment
+ DOCUMENTATION = r'''
+options:
+ login_user:
+ description:
+ - The username used to authenticate with.
+ type: str
+ default: postgres
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - Host running the database.
+ type: str
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases: [ login_port ]
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+notes:
+- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
+- To avoid "Peer authentication failed for user postgres" error,
+ use postgres user as a I(become_user).
+- This module uses psycopg2, a Python PostgreSQL database adapter. You must
+ ensure that psycopg2 is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case), then
+ PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
+ on the remote host before using this module.
+- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
+requirements: [ psycopg2 ]
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
new file mode 100644
index 00000000..1d0490aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for Proxmox VE modules
+ DOCUMENTATION = r'''
+options:
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: no
+requirements: [ "proxmoxer", "requests" ]
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
new file mode 100644
index 00000000..f35f0267
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+'''
+
+ # Documentation fragment for FlashBlade
+ FB = r'''
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.1
+'''
+
+ # Documentation fragment for FlashArray
+ FA = r'''
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ required: true
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+ required: true
+notes:
+ - This module requires the C(purestorage) Python library
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purestorage
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
new file mode 100644
index 00000000..0f57dd88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Rackspace only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_key:
+ description:
+ - Rackspace API key, overrides I(credentials).
+ type: str
+ aliases: [ password ]
+ credentials:
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ type: path
+ aliases: [ creds_file ]
+ env:
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ type: str
+ region:
+ description:
+ - Region to create an instance in.
+ type: str
+ username:
+ description:
+ - Rackspace username, overrides I(credentials).
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
+
+ # Documentation fragment including attributes to enable communication
+ # of other OpenStack clouds. Not all rax modules support this.
+ OPENSTACK = r'''
+options:
+ api_key:
+ type: str
+ description:
+ - Rackspace API key, overrides I(credentials).
+ aliases: [ password ]
+ auth_endpoint:
+ type: str
+ description:
+ - The URI of the authentication service.
+ - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
+ credentials:
+ type: path
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ aliases: [ creds_file ]
+ env:
+ type: str
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ identity_type:
+ type: str
+ description:
+ - Authentication mechanism to use, such as rackspace or keystone.
+ default: rackspace
+ region:
+ type: str
+ description:
+ - Region to create an instance in.
+ tenant_id:
+ type: str
+ description:
+ - The tenant ID used for authentication.
+ tenant_name:
+ type: str
+ description:
+ - The tenant name used for authentication.
+ username:
+ type: str
+ description:
+ - Rackspace username, overrides I(credentials).
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
new file mode 100644
index 00000000..c1e1b13d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Scaleway OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Scaleway API URL.
+ type: str
+ default: https://api.scaleway.com
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Scaleway API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ query_parameters:
+ description:
+ - List of parameters passed to the query string.
+ type: dict
+ default: {}
+ validate_certs:
+ description:
+ - Validate SSL certs of the Scaleway API.
+ type: bool
+ default: yes
+notes:
+ - Also see the API documentation on U(https://developer.scaleway.com/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
+ - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
+ environment variable.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py
new file mode 100644
index 00000000..413fb496
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ headers:
+ description:
+ - A dictionary of additional headers to be sent to POST and PUT requests.
+ - Is needed for some modules
+ type: dict
+ required: false
+ utm_host:
+ description:
+ - The REST Endpoint of the Sophos UTM.
+ type: str
+ required: true
+ utm_port:
+ description:
+ - The port of the REST interface.
+ type: int
+ default: 4444
+ utm_token:
+ description:
+ - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\
+ PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2."
+ type: str
+ required: true
+ utm_protocol:
+ description:
+ - The protocol of the REST Endpoint.
+ choices: [ http, https ]
+ type: str
+ default: https
+ validate_certs:
+ description:
+ - Whether the REST interface's ssl certificate should be verified or not.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The desired state of the object.
+ - C(present) will create or update an object
+ - C(absent) will delete an object if it was present
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py
new file mode 100644
index 00000000..9f756cc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py
@@ -0,0 +1,52 @@
+#
+# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for Vexata VX100 arrays.
+'''
+
+ # Documentation fragment for Vexata VX100 series
+ VX100 = r'''
+options:
+ array:
+ description:
+ - Vexata VX100 array hostname or IPv4 Address.
+ required: true
+ type: str
+ user:
+ description:
+ - Vexata API user with administrative privileges.
+ required: false
+ type: str
+ password:
+ description:
+ - Vexata API user password.
+ required: false
+ type: str
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine.
+ required: false
+ type: bool
+ default: 'no'
+
+requirements:
+ - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
+ - vexatapi >= 0.0.1
+ - python >= 2.7
+ - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
+ user and password arguments are not passed to the module directly.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
new file mode 100644
index 00000000..747bf02f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for XenServer modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the XenServer host or XenServer pool master.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
+ type: str
+ default: localhost
+ aliases: [ host, pool ]
+ username:
+ description:
+ - The username to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
+ type: str
+ default: root
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py
new file mode 100644
index 00000000..b2124ed7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def dict_kv(value, key):
+ '''Return a dictionary with a single key-value pair
+
+ Example:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ myvar: myvalue
+ tasks:
+ - debug:
+ msg: "{{ myvar | dict_kv('thatsmyvar') }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": {
+ "thatsmyvar": "myvalue"
+ }
+ }
+
+ Example 2:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ common_config:
+ type: host
+ database: all
+ myservers:
+ - server1
+ - server2
+ tasks:
+ - debug:
+ msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": [
+ {
+ "database": "all",
+ "server": "server1",
+ "type": "host"
+ },
+ {
+ "database": "all",
+ "server": "server2",
+ "type": "host"
+ }
+ ]
+ }
+ '''
+ return {key: value}
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'dict_kv': dict_kv
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py
new file mode 100644
index 00000000..e854128f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py
@@ -0,0 +1,94 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# contributed by Kelly Brazil <kellyjonbrazil@gmail.com>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+import importlib
+
+try:
+ import jc
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def jc(data, parser, quiet=True, raw=False):
+ """Convert returned command output to JSON using the JC library
+
+ Arguments:
+
+ parser required (string) the correct parser for the input data (e.g. 'ifconfig')
+ see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
+ quiet optional (bool) True to suppress warning messages (default is True)
+ raw optional (bool) True to return pre-processed JSON (default is False)
+
+ Returns:
+
+ dictionary or list of dictionaries
+
+ Example:
+
+ - name: run date command
+ hosts: ubuntu
+ tasks:
+ - shell: date
+ register: result
+ - set_fact:
+ myvar: "{{ result.stdout | community.general.jc('date') }}"
+ - debug:
+ msg: "{{ myvar }}"
+
+ produces:
+
+ ok: [192.168.1.239] => {
+ "msg": {
+ "day": 9,
+ "hour": 22,
+ "minute": 6,
+ "month": "Aug",
+ "month_num": 8,
+ "second": 22,
+ "timezone": "UTC",
+ "weekday": "Sun",
+ "weekday_num": 1,
+ "year": 2020
+ }
+ }
+ """
+
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jc" prior to running jc filter')
+
+ try:
+ jc_parser = importlib.import_module('jc.parsers.' + parser)
+ return jc_parser.parse(data, quiet=quiet, raw=raw)
+
+ except Exception as e:
+ raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'jc': jc
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py
new file mode 100644
index 00000000..972109a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py
@@ -0,0 +1,56 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ # Hack to handle Ansible String Types
+ # See issue: https://github.com/ansible-collections/community.general/issues/320
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py
new file mode 100644
index 00000000..aa9f59be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py
@@ -0,0 +1,73 @@
+# (c) 2020 Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from random import Random, SystemRandom
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six import string_types
+
+
+def random_mac(value, seed=None):
+ ''' takes string prefix, and return it completed with random bytes
+ to get a complete 6 bytes MAC address '''
+
+ if not isinstance(value, string_types):
+ raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' %
+ (type(value), value))
+
+ value = value.lower()
+ mac_items = value.split(':')
+
+ if len(mac_items) > 5:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated'
+ ' items max' % value)
+
+ err = ""
+ for mac in mac_items:
+ if not mac:
+ err += ",empty item"
+ continue
+ if not re.match('[a-f0-9]{2}', mac):
+ err += ",%s not hexa byte" % mac
+ err = err.strip(',')
+
+ if err:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err))
+
+ if seed is None:
+ r = SystemRandom()
+ else:
+ r = Random(seed)
+ # Generate random int between x1000000000 and xFFFFFFFFFF
+ v = r.randint(68719476736, 1099511627775)
+ # Select first n chars to complement input prefix
+ remain = 2 * (6 - len(mac_items))
+ rnd = ('%x' % v)[:remain]
+ return value + re.sub(r'(..)', r':\1', rnd)
+
+
+class FilterModule:
+ ''' Ansible jinja2 filters '''
+ def filters(self):
+ return {
+ 'random_mac': random_mac,
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py
new file mode 100644
index 00000000..3b44ad0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.errors import AnsibleFilterError
+
+
+UNIT_FACTORS = {
+ 'ms': [],
+ 's': [1000],
+ 'm': [1000, 60],
+ 'h': [1000, 60, 60],
+ 'd': [1000, 60, 60, 24],
+ 'w': [1000, 60, 60, 24, 7],
+ 'mo': [1000, 60, 60, 24, 30],
+ 'y': [1000, 60, 60, 24, 365],
+}
+
+
+UNIT_TO_SHORT_FORM = {
+ 'millisecond': 'ms',
+ 'msec': 'ms',
+ 'msecond': 'ms',
+ 'sec': 's',
+ 'second': 's',
+ 'hour': 'h',
+ 'min': 'm',
+ 'minute': 'm',
+ 'day': 'd',
+ 'week': 'w',
+ 'month': 'mo',
+ 'year': 'y',
+}
+
+
+def multiply(factors):
+ result = 1
+ for factor in factors:
+ result = result * factor
+ return result
+
+
+def to_time_unit(human_time, unit='ms', **kwargs):
+ ''' Return a time unit from a human readable string '''
+ unit_to_short_form = UNIT_TO_SHORT_FORM
+ unit_factors = UNIT_FACTORS
+
+ unit = unit_to_short_form.get(unit.rstrip('s'), unit)
+ if unit not in unit_factors:
+ raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. "
+ "Available units (singular or plural): %s. "
+ "Available short units: %s"
+ % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys())))
+
+ if 'year' in kwargs:
+ unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')]
+ if 'month' in kwargs:
+ unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')]
+
+ if kwargs:
+ raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys()))
+
+ result = 0
+ for h_time_string in human_time.split():
+ res = re.match(r'(-?\d+)(\w+)', h_time_string)
+ if not res:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ h_time_int = int(res.group(1))
+ h_time_unit = res.group(2)
+
+ h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit)
+ if h_time_unit not in unit_factors:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit])
+ result += time_in_milliseconds
+ return round(result / multiply(unit_factors[unit]), 12)
+
+
+def to_milliseconds(human_time, **kwargs):
+ ''' Return milli seconds from a human readable string '''
+ return to_time_unit(human_time, 'ms', **kwargs)
+
+
+def to_seconds(human_time, **kwargs):
+ ''' Return seconds from a human readable string '''
+ return to_time_unit(human_time, 's', **kwargs)
+
+
+def to_minutes(human_time, **kwargs):
+ ''' Return minutes from a human readable string '''
+ return to_time_unit(human_time, 'm', **kwargs)
+
+
+def to_hours(human_time, **kwargs):
+ ''' Return hours from a human readable string '''
+ return to_time_unit(human_time, 'h', **kwargs)
+
+
+def to_days(human_time, **kwargs):
+ ''' Return days from a human readable string '''
+ return to_time_unit(human_time, 'd', **kwargs)
+
+
+def to_weeks(human_time, **kwargs):
+ ''' Return weeks from a human readable string '''
+ return to_time_unit(human_time, 'w', **kwargs)
+
+
+def to_months(human_time, **kwargs):
+ ''' Return months from a human readable string '''
+ return to_time_unit(human_time, 'mo', **kwargs)
+
+
+def to_years(human_time, **kwargs):
+ ''' Return years from a human readable string '''
+ return to_time_unit(human_time, 'y', **kwargs)
+
+
+class FilterModule(object):
+ ''' Ansible time jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'to_time_unit': to_time_unit,
+ 'to_milliseconds': to_milliseconds,
+ 'to_seconds': to_seconds,
+ 'to_minutes': to_minutes,
+ 'to_hours': to_hours,
+ 'to_days': to_days,
+ 'to_weeks': to_weeks,
+ 'to_months': to_months,
+ 'to_years': to_years,
+ }
+
+ return filters
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py
new file mode 100644
index 00000000..0178c2ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2020 Orion Poplawski <orion@nwra.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Orion Poplawski (@opoplawski)
+ name: cobbler
+ plugin_type: inventory
+ short_description: Cobbler inventory source
+ version_added: 1.0.0
+ description:
+ - Get inventory hosts from the cobbler service.
+ - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
+ required: yes
+ choices: [ 'cobbler', 'community.general.cobbler' ]
+ url:
+ description: URL to cobbler.
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ required: no
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password
+ required: no
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails
+ type: boolean
+ default: no
+ exclude_profiles:
+ description: Profiles to exclude from inventory
+ type: list
+ default: []
+ elements: str
+ group_by:
+ description: Keys to group hosts by
+ type: list
+ default: [ 'mgmt_classes', 'owners', 'status' ]
+ group:
+ description: Group to place all hosts into
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups
+ default: cobbler_
+ want_facts:
+ description: Toggle, if C(true) the plugin will retrieve host facts from the server
+ type: boolean
+ default: yes
+'''
+
+EXAMPLES = '''
+# my.cobbler.yml
+plugin: community.general.cobbler
+url: http://cobbler/cobbler_api
+user: ansible-tester
+password: secure
+'''
+
+from distutils.version import LooseVersion
+import socket
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.module_utils.six import iteritems
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
+
+# xmlrpc
+try:
+ import xmlrpclib as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+except ImportError:
+ try:
+ import xmlrpc.client as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+ except ImportError:
+ HAS_XMLRPC_CLIENT = False
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using cobbler as source. '''
+
+ NAME = 'community.general.cobbler'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.cobbler_url = None
+ self.exclude_profiles = [] # A list of profiles to exclude
+
+ self.connection = None
+ self.token = None
+
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('cobbler.yaml', 'cobbler.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
+ return valid
+
+ def _get_connection(self):
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
+
+ if self.connection is None:
+ self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
+ self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
+ return self.connection
+
+ def _init_cache(self):
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {}
+
+ def _reload_cache(self):
+ if self.get_option('cache_fallback'):
+ self.display.vvv('Cannot connect to server, loading cache\n')
+ self._options['cache_timeout'] = 0
+ self.load_cache_plugin()
+ self._cache.get(self.cache_key, {})
+
+ def _get_profiles(self):
+ if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_profiles(self.token)
+ else:
+ data = c.get_profiles()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['profiles'] = data
+
+ return self._cache[self.cache_key]['profiles']
+
+ def _get_systems(self):
+ if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_systems(self.token)
+ else:
+ data = c.get_systems()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['systems'] = data
+
+ return self._cache[self.cache_key]['systems']
+
+ def _add_safe_group_name(self, group, child=None):
+ group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
+ if child is not None:
+ self.inventory.add_child(group_name, child)
+ return group_name
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.cobbler_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ self.exclude_profiles = self.get_option('exclude_profiles')
+ self.group_by = self.get_option('group_by')
+
+ for profile in self._get_profiles():
+ if profile['parent']:
+ self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
+ if profile['parent'] not in self.exclude_profiles:
+ parent_group_name = self._add_safe_group_name(profile['parent'])
+ self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
+ if profile['name'] not in self.exclude_profiles:
+ group_name = self._add_safe_group_name(profile['name'])
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ self.inventory.add_child(parent_group_name, group_name)
+ else:
+ self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
+ # Create a heirarchy of profile names
+ profile_elements = profile['name'].split('-')
+ i = 0
+ while i < len(profile_elements) - 1:
+ profile_group = '-'.join(profile_elements[0:i + 1])
+ profile_group_child = '-'.join(profile_elements[0:i + 2])
+ if profile_group in self.exclude_profiles:
+ self.display.vvvv('Excluding profile %s\n' % profile_group)
+ break
+ group_name = self._add_safe_group_name(profile_group)
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ child_group_name = self._add_safe_group_name(profile_group_child)
+ self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
+ self.inventory.add_child(group_name, child_group_name)
+ i = i + 1
+
+ # Add default group for this inventory if specified
+ self.group = to_safe_group_name(self.get_option('group'))
+ if self.group is not None and self.group != '':
+ self.inventory.add_group(self.group)
+ self.display.vvvv('Added site group %s\n' % self.group)
+
+ for host in self._get_systems():
+ # Get the FQDN for the host and add it to the right groups
+ hostname = host['hostname'] # None
+ interfaces = host['interfaces']
+
+ if host['profile'] in self.exclude_profiles:
+ self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
+ continue
+
+ # hostname is often empty for non-static IP hosts
+ if hostname == '':
+ for (iname, ivalue) in iteritems(interfaces):
+ if ivalue['management'] or not ivalue['static']:
+ this_dns_name = ivalue.get('dns_name', None)
+ if this_dns_name is not None and this_dns_name != "":
+ hostname = this_dns_name
+ self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
+
+ if hostname == '':
+ self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
+ continue
+
+ self.inventory.add_host(hostname)
+ self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
+
+ # Add host to profile group
+ group_name = self._add_safe_group_name(host['profile'], child=hostname)
+ self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+
+ # Add host to groups specified by group_by fields
+ for group_by in self.group_by:
+ if host[group_by] == '<<inherit>>':
+ groups = []
+ else:
+ groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
+ for group in groups:
+ group_name = self._add_safe_group_name(group, child=hostname)
+ self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
+
+ # Add to group for this inventory
+ if self.group is not None:
+ self.inventory.add_child(self.group, hostname)
+
+ # Add host variables
+ if self.get_option('want_facts'):
+ try:
+ self.inventory.set_variable(hostname, 'cobbler', host)
+ except ValueError as e:
+ self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py
new file mode 100644
index 00000000..7d92184b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py
@@ -0,0 +1,272 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_machine
+ plugin_type: inventory
+ author: Ximon Eighteen (@ximon18)
+ short_description: Docker Machine inventory source
+ requirements:
+ - L(Docker Machine,https://docs.docker.com/machine/)
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from Docker Machine.
+ - Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
+ - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
+ - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the C(docker_machine) plugin.
+ required: yes
+ choices: ['docker_machine', 'community.general.docker_machine']
+ daemon_env:
+ description:
+ - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
+ - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
+ A warning will be issued for any skipped host if the choice is C(require).
+ - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
+ A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
+ - With C(skip), do not attempt to fetch the docker daemon connection environment variables.
+ - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
+ type: str
+ choices:
+ - require
+ - require-silently
+ - optional
+ - optional-silently
+ - skip
+ default: require
+ running_required:
+ description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
+ type: bool
+ default: yes
+ verbose_output:
+ description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+# Minimal example
+plugin: community.general.docker_machine
+
+# Example using constructed features to create a group per Docker Machine driver
+# (https://docs.docker.com/machine/drivers/), e.g.:
+# $ docker-machine create --driver digitalocean ... mymachine
+# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
+# {
+# ...
+# "digitalocean": {
+# "hosts": [
+# "mymachine"
+# ]
+# ...
+# }
+strict: no
+keyed_groups:
+ - separator: ''
+ key: docker_machine_node_attributes.DriverName
+
+# Example grouping hosts by Digital Machine tag
+strict: no
+keyed_groups:
+ - prefix: tag
+ key: 'dm_tags'
+
+# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
+compose:
+ ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+import json
+import re
+import subprocess
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Docker machine as source. '''
+
+ NAME = 'community.general.docker_machine'
+
+ DOCKER_MACHINE_PATH = None
+
+ def _run_command(self, args):
+ if not self.DOCKER_MACHINE_PATH:
+ try:
+ self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ command = [self.DOCKER_MACHINE_PATH]
+ command.extend(args)
+ display.debug('Executing command {0}'.format(command))
+ try:
+ result = subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+ display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
+ raise e
+
+ return to_text(result).strip()
+
+ def _get_docker_daemon_variables(self, machine_name):
+ '''
+ Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
+ the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
+ '''
+ try:
+ env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
+ except subprocess.CalledProcessError:
+ # This can happen when the machine is created but provisioning is incomplete
+ return []
+
+ # example output of docker-machine env --shell=sh:
+ # export DOCKER_TLS_VERIFY="1"
+ # export DOCKER_HOST="tcp://134.209.204.160:2376"
+ # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+ # export DOCKER_MACHINE_NAME="routinator"
+ # # Run this command to configure your shell:
+ # # eval $(docker-machine env --shell=bash routinator)
+
+ # capture any of the DOCKER_xxx variables that were output and create Ansible host vars
+ # with the same name and value but with a dm_ name prefix.
+ vars = []
+ for line in env_lines:
+ match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
+ if match:
+ env_var_name = match.group(1)
+ env_var_value = match.group(2)
+ vars.append((env_var_name, env_var_value))
+
+ return vars
+
+ def _get_machine_names(self):
+ # Filter out machines that are not in the Running state as we probably can't do anything useful actions
+ # with them.
+ ls_command = ['ls', '-q']
+ if self.get_option('running_required'):
+ ls_command.extend(['--filter', 'state=Running'])
+
+ try:
+ ls_lines = self._run_command(ls_command)
+ except subprocess.CalledProcessError:
+ return []
+
+ return ls_lines.splitlines()
+
+ def _inspect_docker_machine_host(self, node):
+ try:
+ inspect_lines = self._run_command(['inspect', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return json.loads(inspect_lines)
+
+ def _ip_addr_docker_machine_host(self, node):
+ try:
+ ip_addr = self._run_command(['ip', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return ip_addr
+
+ def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
+ if not env_var_tuples:
+ warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
+ if daemon_env in ('require', 'require-silently'):
+ if daemon_env == 'require':
+ display.warning('{0}: host will be skipped'.format(warning_prefix))
+ return True
+ else: # 'optional', 'optional-silently'
+ if daemon_env == 'optional':
+ display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
+ return False
+
+ def _populate(self):
+ daemon_env = self.get_option('daemon_env')
+ try:
+ for self.node in self._get_machine_names():
+ self.node_attrs = self._inspect_docker_machine_host(self.node)
+ if not self.node_attrs:
+ continue
+
+ machine_name = self.node_attrs['Driver']['MachineName']
+
+ # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
+ # that could be used to set environment variables to influence a local Docker client:
+ if daemon_env == 'skip':
+ env_var_tuples = []
+ else:
+ env_var_tuples = self._get_docker_daemon_variables(machine_name)
+ if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
+ continue
+
+ # add an entry in the inventory for this host
+ self.inventory.add_host(machine_name)
+
+ # check for valid ip address from inspect output, else explicitly use ip command to find host ip address
+ # this works around an issue seen with Google Compute Platform where the IP address was not available
+ # via the 'inspect' subcommand but was via the 'ip' subcomannd.
+ if self.node_attrs['Driver']['IPAddress']:
+ ip_addr = self.node_attrs['Driver']['IPAddress']
+ else:
+ ip_addr = self._ip_addr_docker_machine_host(self.node)
+
+ # set standard Ansible remote host connection settings to details captured from `docker-machine`
+ # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
+ self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
+ self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
+ self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
+ self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
+
+ # set variables based on Docker Machine tags
+ tags = self.node_attrs['Driver'].get('Tags') or ''
+ self.inventory.set_variable(machine_name, 'dm_tags', tags)
+
+ # set variables based on Docker Machine env variables
+ for kv in env_var_tuples:
+ self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
+
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
+
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
+ to_native(e), orig_exc=e)
+
+ def verify_file(self, path):
+ """Return the possibility of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..e730bd0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ plugin_type: inventory
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ docker_swarm, community.general.docker_swarm ]
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: yes
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: no
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by docker-py.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: no
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: community.general.docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: yes
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: yes
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # e.g. a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'community.general.docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
new file mode 100644
index 00000000..ce487f2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: gitlab_runners
+ plugin_type: inventory
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for GitLab runners.
+ requirements:
+ - python >= 2.7
+ - python-gitlab > 1.8.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ default: https://gitlab.com
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: filter runners from GitLab API
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+# gitlab_runners.yml
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+strict: False
+keyed_groups:
+ # add e.g. amd64 hosts to an arch_amd64 group
+ - prefix: arch
+ key: 'architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'platform'
+ # create a group per runner tag
+ # e.g. a runner tagged w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'tag_list'
+ prefix: tag
+'''
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_native
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+try:
+ import gitlab
+ HAS_GITLAB = True
+except ImportError:
+ HAS_GITLAB = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using GitLab API as source. '''
+
+ NAME = 'community.general.gitlab_runners'
+
+ def _populate(self):
+ gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token'))
+ self.inventory.add_group('gitlab_runners')
+ try:
+ if self.get_option('filter'):
+ runners = gl.runners.all(scope=self.get_option('filter'))
+ else:
+ runners = gl.runners.all()
+ for runner in runners:
+ host = str(runner['id'])
+ ip_address = runner['ip_address']
+ host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
+ self.inventory.add_host(host, group='gitlab_runners')
+ self.inventory.set_variable(host, 'ansible_host', ip_address)
+ if self.get_option('verbose_output', True):
+ self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
+ except Exception as e:
+ raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml")))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_GITLAB:
+ raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py
new file mode 100644
index 00000000..14ba9df2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py
@@ -0,0 +1,256 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: kubevirt
+ plugin_type: inventory
+ author:
+ - KubeVirt Team (@kubevirt)
+
+ short_description: KubeVirt inventory source
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Fetch running VirtualMachines for one or more namespaces.
+ - Groups by namespace, namespace_vms and labels.
+ - Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'kubevirt' plugin.
+ required: True
+ choices: ['kubevirt', 'community.general.kubevirt']
+ type: str
+ host_format:
+ description:
+ - Specify the format of the host in the inventory group.
+ default: "{namespace}-{name}-{uid}"
+ connections:
+ type: list
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ type: str
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the OpenShift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ type: str
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ type: str
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ type: str
+ cert_file:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ type: str
+ key_file:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
+ environment variable.
+ type: str
+ ssl_ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ type: str
+ verify_ssl:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
+ to access.
+ type: list
+ network_name:
+ description:
+ - In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
+ address.
+ type: str
+ aliases: [ interface_name ]
+ api_version:
+ description:
+ - "Specify the KubeVirt API version."
+ type: str
+ annotation_variable:
+ description:
+ - "Specify the name of the annotation which provides data, which should be used as inventory host variables."
+ - "Note, that the value in ansible annotations should be json."
+ type: str
+ default: 'ansible'
+ requirements:
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named kubevirt.yaml or kubevirt.yml
+
+# Authenticate with token, and return all virtual machines for all namespaces
+plugin: community.general.kubevirt
+connections:
+ - host: https://kubevirt.io
+ token: xxxxxxxxxxxxxxxx
+ ssl_verify: false
+
+# Use default config (~/.kube/config) file and active context, and return vms with interfaces
+# connected to network myovsnetwork and from namespace vms
+plugin: community.general.kubevirt
+connections:
+ - namespaces:
+ - vms
+ network_name: myovsnetwork
+'''
+
+import json
+
+from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+API_VERSION = 'kubevirt.io/v1alpha3'
+
+
+class InventoryModule(K8sInventoryModule):
+ NAME = 'community.general.kubevirt'
+
+ def setup(self, config_data, cache, cache_key):
+ self.config_data = config_data
+ super(InventoryModule, self).setup(config_data, cache, cache_key)
+
+ def fetch_objects(self, connections):
+ client = self.get_api_client()
+ vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
+
+ if connections:
+ for connection in connections:
+ client = self.get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ interface_name = connection.get('network_name')
+ api_version = connection.get('api_version', API_VERSION)
+ annotation_variable = connection.get('annotation_variable', 'ansible')
+ for namespace in namespaces:
+ self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
+ else:
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
+
+ def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
+ v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
+ try:
+ obj = v1_vm.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_vms_group = '{0}_vms'.format(namespace_group)
+
+ name = self._sanitize_group_name(name)
+ namespace_group = self._sanitize_group_name(namespace_group)
+ namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_vms_group)
+ self.inventory.add_child(namespace_group, namespace_vms_group)
+ for vm in obj.items:
+ if not (vm.status and vm.status.interfaces):
+ continue
+
+ # Find interface by its name:
+ if interface_name is None:
+ interface = vm.status.interfaces[0]
+ else:
+ interface = next(
+ (i for i in vm.status.interfaces if i.name == interface_name),
+ None
+ )
+
+ # If interface is not found or IP address is not reported skip this VM:
+ if interface is None or interface.ipAddress is None:
+ continue
+
+ vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
+ vm_ip = interface.ipAddress
+ vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
+
+ self.inventory.add_host(vm_name)
+
+ if vm.metadata.labels:
+ # create a group for each label_value
+ for key, value in vm.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ group_name = self._sanitize_group_name(group_name)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, vm_name)
+ vm_labels = dict(vm.metadata.labels)
+ else:
+ vm_labels = {}
+
+ self.inventory.add_child(namespace_vms_group, vm_name)
+
+ # add hostvars
+ self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
+ self.inventory.set_variable(vm_name, 'labels', vm_labels)
+ self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
+ self.inventory.set_variable(vm_name, 'object_type', 'vm')
+ self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
+ self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
+
+ # Add all variables which are listed in 'ansible' annotation:
+ annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
+ for k, v in annotations_data.items():
+ self.inventory.set_variable(vm_name, k, v)
+
+ def verify_file(self, path):
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
+ return True
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py
new file mode 100644
index 00000000..c308fb82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py
@@ -0,0 +1,211 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: linode
+ plugin_type: inventory
+ author:
+ - Luke Murphy (@decentral1se)
+ short_description: Ansible dynamic inventory plugin for Linode.
+ requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+ description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The inventory groups are built from groups and not tags.
+ options:
+ plugin:
+ description: marks this as an instance of the 'linode' plugin
+ required: true
+ choices: ['linode', 'community.general.linode']
+ access_token:
+ description: The Linode account personal access token.
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ required: false
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ required: false
+'''
+
+EXAMPLES = r'''
+# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
+plugin: community.general.linode
+
+# Example with regions, types, groups and access token
+plugin: community.general.linode
+access_token: foobar
+regions:
+ - eu-west
+types:
+ - g5-standard-2
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+try:
+ from linode_api4 import LinodeClient
+ from linode_api4.errors import ApiError as LinodeApiError
+ HAS_LINODE = True
+except ImportError:
+ HAS_LINODE = False
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'community.general.linode'
+
+ def _build_client(self):
+ """Build the Linode client."""
+
+ access_token = self.get_option('access_token')
+
+ if access_token is None:
+ try:
+ access_token = os.environ['LINODE_ACCESS_TOKEN']
+ except KeyError:
+ pass
+
+ if access_token is None:
+ raise AnsibleError((
+ 'Could not retrieve Linode access token '
+ 'from plugin configuration or environment'
+ ))
+
+ self.client = LinodeClient(access_token)
+
+ def _get_instances_inventory(self):
+ """Retrieve Linode instance information from cloud inventory."""
+ try:
+ self.instances = self.client.linode.instances()
+ except LinodeApiError as exception:
+ raise AnsibleError('Linode client raised: %s' % exception)
+
+ def _add_groups(self):
+ """Add Linode instance groups to the dynamic inventory."""
+ self.linode_groups = set(
+ filter(None, [
+ instance.group
+ for instance
+ in self.instances
+ ])
+ )
+
+ for linode_group in self.linode_groups:
+ self.inventory.add_group(linode_group)
+
+ def _filter_by_config(self, regions, types):
+ """Filter instances by user specified configuration."""
+ if regions:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.region.id in regions
+ ]
+
+ if types:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.type.id in types
+ ]
+
+ def _add_instances_to_groups(self):
+ """Add instance names to their dynamic inventory groups."""
+ for instance in self.instances:
+ self.inventory.add_host(instance.label, group=instance.group)
+
+ def _add_hostvars_for_instances(self):
+ """Add hostvars for instances in the dynamic inventory."""
+ for instance in self.instances:
+ hostvars = instance._raw_json
+ for hostvar_key in hostvars:
+ self.inventory.set_variable(
+ instance.label,
+ hostvar_key,
+ hostvars[hostvar_key]
+ )
+
+ def _validate_option(self, name, desired_type, option_value):
+ """Validate user specified configuration data against types."""
+ if isinstance(option_value, string_types) and desired_type == list:
+ option_value = [option_value]
+
+ if option_value is None:
+ option_value = desired_type()
+
+ if not isinstance(option_value, desired_type):
+ raise AnsibleParserError(
+ 'The option %s (%s) must be a %s' % (
+ name, option_value, desired_type
+ )
+ )
+
+ return option_value
+
+ def _get_query_options(self, config_data):
+ """Get user specified query options from the configuration."""
+ options = {
+ 'regions': {
+ 'type_to_be': list,
+ 'value': config_data.get('regions', [])
+ },
+ 'types': {
+ 'type_to_be': list,
+ 'value': config_data.get('types', [])
+ },
+ }
+
+ for name in options:
+ options[name]['value'] = self._validate_option(
+ name,
+ options[name]['type_to_be'],
+ options[name]['value']
+ )
+
+ regions = options['regions']['value']
+ types = options['types']['value']
+
+ return regions, types
+
+ def verify_file(self, path):
+ """Verify the Linode configuration file."""
+ if super(InventoryModule, self).verify_file(path):
+ endings = ('linode.yaml', 'linode.yml')
+ if any((path.endswith(ending) for ending in endings)):
+ return True
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ """Dynamically parse Linode the cloud inventory."""
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ if not HAS_LINODE:
+ raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
+
+ config_data = self._read_config_data(path)
+ self._build_client()
+
+ self._get_instances_inventory()
+
+ regions, types = self._get_query_options(config_data)
+ self._filter_by_config(regions, types)
+
+ self._add_groups()
+ self._add_instances_to_groups()
+ self._add_hostvars_for_instances()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py
new file mode 100644
index 00000000..6e2efae3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: nmap
+ plugin_type: inventory
+ short_description: Uses nmap to find hosts to target
+ description:
+ - Uses a YAML configuration file with a valid YAML extension.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ requirements:
+ - nmap CLI installed
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'nmap' plugin.
+ required: True
+ choices: ['nmap', 'community.general.nmap']
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ required: True
+ exclude:
+ description: list of addresses to exclude
+ type: list
+ ports:
+ description: Enable/disable scanning for open ports
+ type: boolean
+ default: True
+ ipv4:
+ description: use IPv4 type addresses
+ type: boolean
+ default: True
+ ipv6:
+ description: use IPv6 type addresses
+ type: boolean
+ default: True
+ notes:
+ - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
+ - 'TODO: add OS fingerprinting'
+'''
+EXAMPLES = '''
+# inventory.config file in YAML format
+plugin: community.general.nmap
+strict: False
+address: 192.168.0.0/24
+'''
+
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.nmap'
+ find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
+ find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
+
+ def __init__(self):
+ self._nmap = None
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=False):
+
+ try:
+ self._nmap = get_bin_path('nmap')
+ except ValueError as e:
+ raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ self._read_config_data(path)
+
+ # setup command
+ cmd = [self._nmap]
+ if not self._options['ports']:
+ cmd.append('-sP')
+
+ if self._options['ipv4'] and not self._options['ipv6']:
+ cmd.append('-4')
+ elif self._options['ipv6'] and not self._options['ipv4']:
+ cmd.append('-6')
+ elif not self._options['ipv6'] and not self._options['ipv4']:
+ raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
+
+ if self._options['exclude']:
+ cmd.append('--exclude')
+ cmd.append(','.join(self._options['exclude']))
+
+ cmd.append(self._options['address'])
+ try:
+ # execute
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
+
+ # parse results
+ host = None
+ ip = None
+ ports = []
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
+
+ for line in t_stdout.splitlines():
+ hits = self.find_host.match(line)
+ if hits:
+ if host is not None:
+ self.inventory.set_variable(host, 'ports', ports)
+
+ # if dns only shows arpa, just use ip instead as hostname
+ if hits.group(1).endswith('.in-addr.arpa'):
+ host = hits.group(2)
+ else:
+ host = hits.group(1)
+
+ # if no reverse dns exists, just use ip instead as hostname
+ if hits.group(2) is not None:
+ ip = hits.group(2)
+ else:
+ ip = hits.group(1)
+
+ if host is not None:
+ # update inventory
+ self.inventory.add_host(host)
+ self.inventory.set_variable(host, 'ip', ip)
+ ports = []
+ continue
+
+ host_ports = self.find_port.match(line)
+ if host is not None and host_ports:
+ ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
+ continue
+
+ # TODO: parse more data, OS?
+
+ # if any leftovers
+ if host and ports:
+ self.inventory.set_variable(host, 'ports', ports)
+
+ except Exception as e:
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py
new file mode 100644
index 00000000..d976633a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: online
+ plugin_type: inventory
+ author:
+ - Remy Leone (@sieben)
+ short_description: Scaleway (previously Online SAS or Online.net) inventory source
+ description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'online' plugin.
+ required: True
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: True
+ description: Online OAuth token.
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ choices:
+ - location
+ - offer
+ - rpn
+'''
+
+EXAMPLES = r'''
+# online_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i online_inventory.yml
+
+plugin: community.general.online
+hostnames:
+ - public_ipv4
+groups:
+ - location
+ - offer
+ - rpn
+'''
+
+import json
+from sys import version as python_version
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ NAME = 'community.general.online'
+ API_ENDPOINT = "https://api.online.net"
+
+ def extract_public_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["ip"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
+ return None
+
+ def extract_private_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["private"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
+ return None
+
+ def extract_os_name(self, host_infos):
+ try:
+ return host_infos["os"]["name"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS name. Information skipped.")
+ return None
+
+ def extract_os_version(self, host_infos):
+ try:
+ return host_infos["os"]["version"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS version. Information skipped.")
+ return None
+
+ def extract_hostname(self, host_infos):
+ try:
+ return host_infos["hostname"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting hostname. Information skipped.")
+ return None
+
+ def extract_location(self, host_infos):
+ try:
+ return host_infos["location"]["datacenter"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting datacenter location. Information skipped.")
+ return None
+
+ def extract_offer(self, host_infos):
+ try:
+ return host_infos["offer"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting commercial offer. Information skipped.")
+ return None
+
+ def extract_rpn(self, host_infos):
+ try:
+ return self.rpn_lookup_cache[host_infos["id"]]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting RPN information. Information skipped.")
+ return None
+
+ def _fetch_information(self, url):
+ try:
+ response = open_url(url, headers=self.headers)
+ except Exception as e:
+ self.display.warning("An error happened while fetching: %s" % url)
+ return None
+
+ try:
+ raw_data = to_text(response.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
+
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ @staticmethod
+ def extract_rpn_lookup_cache(rpn_list):
+ lookup = {}
+ for rpn in rpn_list:
+ for member in rpn["members"]:
+ lookup[member["id"]] = rpn["name"]
+ return lookup
+
+ def _fill_host_variables(self, hostname, host_infos):
+ targeted_attributes = (
+ "offer",
+ "id",
+ "hostname",
+ "location",
+ "boot_mode",
+ "power",
+ "last_reboot",
+ "anti_ddos",
+ "hardware_watch",
+ "support"
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(hostname, attribute, host_infos[attribute])
+
+ if self.extract_public_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
+
+ if self.extract_private_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
+
+ if self.extract_os_name(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
+
+ if self.extract_os_version(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if self.extractors[pref](host_infos):
+ return self.extractors[pref](host_infos)
+
+ return None
+
+ def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ return
+
+ self.inventory.add_host(host=hostname)
+ self._fill_host_variables(hostname=hostname, host_infos=host_infos)
+
+ for g in group_preferences:
+ group = self.group_extractors[g](host_infos)
+
+ if not group:
+ return
+
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ token = self.get_option("oauth_token")
+ hostname_preferences = self.get_option("hostnames")
+
+ group_preferences = self.get_option("groups")
+ if group_preferences is None:
+ group_preferences = []
+
+ self.extractors = {
+ "public_ipv4": self.extract_public_ipv4,
+ "private_ipv4": self.extract_private_ipv4,
+ "hostname": self.extract_hostname,
+ }
+
+ self.group_extractors = {
+ "location": self.extract_location,
+ "offer": self.extract_offer,
+ "rpn": self.extract_rpn
+ }
+
+ self.headers = {
+ 'Authorization': "Bearer %s" % token,
+ 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]),
+ 'Content-type': 'application/json'
+ }
+
+ servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
+ servers_api_path = self._fetch_information(url=servers_url)
+
+ if "rpn" in group_preferences:
+ rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
+ rpn_list = self._fetch_information(url=rpn_groups_url)
+ self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
+
+ for server_api_path in servers_api_path:
+
+ server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
+ raw_server_info = self._fetch_information(url=server_url)
+
+ if raw_server_info is None:
+ continue
+
+ self.do_server_inventory(host_infos=raw_server_info,
+ hostname_preferences=hostname_preferences,
+ group_preferences=group_preferences)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py
new file mode 100644
index 00000000..aa9a757a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: proxmox
+ plugin_type: inventory
+ short_description: Proxmox inventory source
+ version_added: "1.2.0"
+ author:
+ - Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from a Proxmox PVE cluster.
+ - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
+ - Will retrieve the first network interface with an IP for Proxmox nodes.
+ - Can retrieve LXC/QEMU configuration as facts.
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
+ required: yes
+ choices: ['community.general.proxmox']
+ type: str
+ url:
+ description: URL to Proxmox cluster.
+ default: 'http://localhost:8006'
+ type: str
+ user:
+ description: Proxmox authentication user.
+ required: yes
+ type: str
+ password:
+ description: Proxmox authentication password.
+ required: yes
+ type: str
+ validate_certs:
+ description: Verify SSL certificate if using HTTPS.
+ type: boolean
+ default: yes
+ group_prefix:
+ description: Prefix to apply to Proxmox groups.
+ default: proxmox_
+ type: str
+ facts_prefix:
+ description: Prefix to apply to LXC/QEMU config facts.
+ default: proxmox_
+ type: str
+ want_facts:
+ description: Gather LXC/QEMU configuration facts.
+ default: no
+ type: bool
+'''
+
+EXAMPLES = '''
+# my.proxmox.yml
+plugin: community.general.proxmox
+url: http://localhost:8006
+user: ansible@pve
+password: secure
+validate_certs: no
+'''
+
+import re
+
+from ansible.module_utils.common._collections_compat import MutableMapping
+from distutils.version import LooseVersion
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using Proxmox as source. '''
+
+ NAME = 'community.general.proxmox'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.proxmox_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('proxmox.yaml', 'proxmox.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_auth(self):
+ credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
+
+ a = self._get_session()
+ ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
+
+ json = ret.json()
+
+ self.credentials = {
+ 'ticket': json['data']['ticket'],
+ 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
+ }
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {'url': ''}
+
+ data = []
+ s = self._get_session()
+ while True:
+ headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
+ ret = s.get(url, headers=headers)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'data' not in json:
+ # /hosts/:id does not have a 'data' key
+ data = json
+ break
+ elif isinstance(json['data'], MutableMapping):
+ # /facts are returned as dict in 'data'
+ data = json['data']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ data = data + json['data']
+ break
+
+ self._cache[self.cache_key][url] = data
+
+ return self._cache[self.cache_key][url]
+
+ def _get_nodes(self):
+ return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
+
+ def _get_pools(self):
+ return self._get_json("%s/api2/json/pools" % self.proxmox_url)
+
+ def _get_lxc_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
+
+ def _get_qemu_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
+
+ def _get_members_per_pool(self, pool):
+ ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
+ return ret['members']
+
+ def _get_node_ip(self, node):
+ ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
+
+ for iface in ret:
+ try:
+ return iface['address']
+ except Exception:
+ return None
+
+ def _get_vm_config(self, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
+
+ vmid_key = 'vmid'
+ vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
+ self.inventory.set_variable(name, vmid_key, vmid)
+
+ vmtype_key = 'vmtype'
+ vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
+ self.inventory.set_variable(name, vmtype_key, vmtype)
+
+ for config in ret:
+ key = config
+ key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
+ value = ret[config]
+ try:
+ # fixup disk images as they have no key
+ if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
+ value = ('disk_image=' + value)
+
+ if isinstance(value, int) or ',' not in value:
+ value = value
+ # split off strings with commas to a dict
+ else:
+ # skip over any keys that cannot be processed
+ try:
+ value = dict(key.split("=") for key in value.split(","))
+ except Exception:
+ continue
+
+ self.inventory.set_variable(name, key, value)
+ except NameError:
+ return None
+
+ def _get_vm_status(self, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
+
+ status = ret['status']
+ status_key = 'status'
+ status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
+ self.inventory.set_variable(name, status_key, status)
+
+ def to_safe(self, word):
+ '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
+ #> ProxmoxInventory.to_safe("foo-bar baz")
+ 'foo_barbaz'
+ '''
+ regex = r"[^A-Za-z0-9\_]"
+ return re.sub(regex, "_", word.replace(" ", ""))
+
+ def _populate(self):
+
+ self._get_auth()
+
+ # gather vm's on nodes
+ for node in self._get_nodes():
+ # FIXME: this can probably be cleaner
+ # create groups
+ lxc_group = 'all_lxc'
+ lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower()))
+ self.inventory.add_group(lxc_group)
+ qemu_group = 'all_qemu'
+ qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower()))
+ self.inventory.add_group(qemu_group)
+ nodes_group = 'nodes'
+ nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower()))
+ self.inventory.add_group(nodes_group)
+ running_group = 'all_running'
+ running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower()))
+ self.inventory.add_group(running_group)
+ stopped_group = 'all_stopped'
+ stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower()))
+ self.inventory.add_group(stopped_group)
+
+ if node.get('node'):
+ self.inventory.add_host(node['node'])
+
+ if node['type'] == 'node':
+ self.inventory.add_child(nodes_group, node['node'])
+
+ # get node IP address
+ ip = self._get_node_ip(node['node'])
+ self.inventory.set_variable(node['node'], 'ansible_host', ip)
+
+ # get LXC containers for this node
+ node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
+ self.inventory.add_group(node_lxc_group)
+ for lxc in self._get_lxc_per_node(node['node']):
+ self.inventory.add_host(lxc['name'])
+ self.inventory.add_child(lxc_group, lxc['name'])
+ self.inventory.add_child(node_lxc_group, lxc['name'])
+
+ # get LXC status when want_facts == True
+ if self.get_option('want_facts'):
+ self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name'])
+ if lxc['status'] == 'stopped':
+ self.inventory.add_child(stopped_group, lxc['name'])
+ elif lxc['status'] == 'running':
+ self.inventory.add_child(running_group, lxc['name'])
+
+ # get LXC config for facts
+ if self.get_option('want_facts'):
+ self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
+
+ # get QEMU vm's for this node
+ node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
+ self.inventory.add_group(node_qemu_group)
+ for qemu in self._get_qemu_per_node(node['node']):
+ if qemu['template']:
+ continue
+
+ self.inventory.add_host(qemu['name'])
+ self.inventory.add_child(qemu_group, qemu['name'])
+ self.inventory.add_child(node_qemu_group, qemu['name'])
+
+ # get QEMU status
+ self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name'])
+ if qemu['status'] == 'stopped':
+ self.inventory.add_child(stopped_group, qemu['name'])
+ elif qemu['status'] == 'running':
+ self.inventory.add_child(running_group, qemu['name'])
+
+ # get QEMU config for facts
+ if self.get_option('want_facts'):
+ self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
+
+ # gather vm's in pools
+ for pool in self._get_pools():
+ if pool.get('poolid'):
+ pool_group = 'pool_' + pool['poolid']
+ pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower()))
+ self.inventory.add_group(pool_group)
+
+ for member in self._get_members_per_pool(pool['poolid']):
+ if member.get('name'):
+ self.inventory.add_child(pool_group, member['name'])
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_REQUESTS:
+ raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
+ 'https://github.com/psf/requests.')
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.proxmox_url = self.get_option('url')
+ self.proxmox_user = self.get_option('user')
+ self.proxmox_password = self.get_option('password')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ # actually populate inventory
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py
new file mode 100644
index 00000000..4cc16956
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: scaleway
+ plugin_type: inventory
+ author:
+ - Remy Leone (@sieben)
+ short_description: Scaleway inventory source
+ description:
+ - Get inventory hosts from Scaleway
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'scaleway' plugin.
+ required: True
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region
+ type: list
+ default:
+ - ams1
+ - par1
+ - par2
+ - waw1
+ tags:
+ description: Filter results on a specific tag
+ type: list
+ oauth_token:
+ required: True
+ description: Scaleway OAuth token.
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'set individual variables: keys are variable names and
+ values are templates. Any value returned by the
+ L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
+ can be used.'
+ type: dict
+'''
+
+EXAMPLES = '''
+# scaleway_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i scaleway_inventory.yml
+
+# use hostname as inventory_hostname
+# use the private IP address to connect to the host
+plugin: community.general.scaleway
+regions:
+ - ams1
+ - par1
+tags:
+ - foobar
+hostnames:
+ - hostname
+variables:
+ ansible_host: private_ip
+ state: state
+
+# use hostname as inventory_hostname and public IP address to connect to the host
+plugin: community.general.scaleway
+hostnames:
+ - hostname
+regions:
+ - par1
+variables:
+ ansible_host: public_ip.address
+'''
+
+import json
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
+from ansible.module_utils.urls import open_url
+from ansible.module_utils._text import to_native
+
+import ansible.module_utils.six.moves.urllib.parse as urllib_parse
+
+
+def _fetch_information(token, url):
+ results = []
+ paginated_url = url
+ while True:
+ try:
+ response = open_url(paginated_url,
+ headers={'X-Auth-Token': token,
+ 'Content-type': 'application/json'})
+ except Exception as e:
+ raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
+ try:
+ raw_json = json.loads(response.read())
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ try:
+ results.extend(raw_json["servers"])
+ except KeyError:
+ raise AnsibleError("Incorrect format from the Scaleway API response")
+
+ link = response.headers['Link']
+ if not link:
+ return results
+ relations = parse_pagination_link(link)
+ if 'next' not in relations:
+ return results
+ paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
+
+
+def _build_server_url(api_endpoint):
+ return "/".join([api_endpoint, "servers"])
+
+
+def extract_public_ipv4(server_info):
+ try:
+ return server_info["public_ip"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_private_ipv4(server_info):
+ try:
+ return server_info["private_ip"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_hostname(server_info):
+ try:
+ return server_info["hostname"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_server_id(server_info):
+ try:
+ return server_info["id"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_public_ipv6(server_info):
+ try:
+ return server_info["ipv6"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_tags(server_info):
+ try:
+ return server_info["tags"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_zone(server_info):
+ try:
+ return server_info["location"]["zone_id"]
+ except (KeyError, TypeError):
+ return None
+
+
+extractors = {
+ "public_ipv4": extract_public_ipv4,
+ "private_ipv4": extract_private_ipv4,
+ "public_ipv6": extract_public_ipv6,
+ "hostname": extract_hostname,
+ "id": extract_server_id
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ NAME = 'community.general.scaleway'
+
+ def _fill_host_variables(self, host, server_info):
+ targeted_attributes = (
+ "arch",
+ "commercial_type",
+ "id",
+ "organization",
+ "state",
+ "hostname",
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(host, attribute, server_info[attribute])
+
+ self.inventory.set_variable(host, "tags", server_info["tags"])
+
+ if extract_public_ipv6(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
+
+ if extract_public_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
+
+ if extract_private_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
+
+ def _get_zones(self, config_zones):
+ return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
+
+ def match_groups(self, server_info, tags):
+ server_zone = extract_zone(server_info=server_info)
+ server_tags = extract_tags(server_info=server_info)
+
+ # If a server does not have a zone, it means it is archived
+ if server_zone is None:
+ return set()
+
+ # If no filtering is defined, all tags are valid groups
+ if tags is None:
+ return set(server_tags).union((server_zone,))
+
+ matching_tags = set(server_tags).intersection(tags)
+
+ if not matching_tags:
+ return set()
+ else:
+ return matching_tags.union((server_zone,))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if extractors[pref](host_infos):
+ return extractors[pref](host_infos)
+
+ return None
+
+ def do_zone_inventory(self, zone, token, tags, hostname_preferences):
+ self.inventory.add_group(zone)
+ zone_info = SCALEWAY_LOCATION[zone]
+
+ url = _build_server_url(zone_info["api_endpoint"])
+ raw_zone_hosts_infos = _fetch_information(url=url, token=token)
+
+ for host_infos in raw_zone_hosts_infos:
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ continue
+
+ groups = self.match_groups(host_infos, tags)
+
+ for group in groups:
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+ self._fill_host_variables(host=hostname, server_info=host_infos)
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ config_zones = self.get_option("regions")
+ tags = self.get_option("tags")
+ token = self.get_option("oauth_token")
+ hostname_preference = self.get_option("hostnames")
+
+ for zone in self._get_zones(config_zones):
+ self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
new file mode 100644
index 00000000..21e1b085
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: stackpath_compute
+ plugin_type: inventory
+ short_description: StackPath Edge Computing inventory source
+ version_added: 1.2.0
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from StackPath Edge Computing.
+ - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
+ options:
+ plugin:
+ description:
+ - A token that ensures this is a source file for the plugin.
+ required: true
+ choices: ['community.general.stackpath_compute']
+ client_id:
+ description:
+ - An OAuth client ID generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ client_secret:
+ description:
+ - An OAuth client secret generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ stack_slugs:
+ description:
+ - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
+ type: list
+ elements: str
+ use_internal_ip:
+ description:
+ - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
+ - If an instance doesn't have an external IP it will not be returned when this option is set to false.
+ type: bool
+'''
+
+EXAMPLES = '''
+# Example using credentials to fetch all workload instances in a stack.
+---
+plugin: community.general.stackpath_compute
+client_id: my_client_id
+client_secret: my_client_secret
+stack_slugs:
+- my_first_stack_slug
+- my_other_stack_slug
+use_internal_ip: false
+'''
+
+import traceback
+import json
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import (
+ BaseInventoryPlugin,
+ Constructable,
+ Cacheable
+)
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.stackpath_compute'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ # credentials
+ self.client_id = None
+ self.client_secret = None
+ self.stack_slug = None
+ self.api_host = "https://gateway.stackpath.com"
+ self.group_keys = [
+ "stackSlug",
+ "workloadId",
+ "cityCode",
+ "countryCode",
+ "continent",
+ "target",
+ "name",
+ "workloadSlug"
+ ]
+
+ def _validate_config(self, config):
+ if config['plugin'] != 'community.general.stackpath_compute':
+ raise AnsibleError("plugin doesn't match this plugin")
+ try:
+ client_id = config['client_id']
+ if client_id != 32:
+ raise AnsibleError("client_id must be 32 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ try:
+ client_secret = config['client_secret']
+ if client_secret != 64:
+ raise AnsibleError("client_secret must be 64 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ return True
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+ self.client_id = self.get_option('client_id')
+ self.client_secret = self.get_option('client_secret')
+
+ def _authenticate(self):
+ payload = json.dumps(
+ {
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "grant_type": "client_credentials",
+ }
+ )
+ headers = {
+ "Content-Type": "application/json",
+ }
+ resp = open_url(
+ self.api_host + '/identity/v1/oauth2/token',
+ headers=headers,
+ data=payload,
+ method="POST"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ self.auth_token = json.loads(body)["access_token"]
+
+ def _query(self):
+ results = []
+ workloads = []
+ self._authenticate()
+ for stack_slug in self.stack_slugs:
+ try:
+ workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
+ except Exception:
+ raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
+ for workload in workloads:
+ try:
+ workload_instances = self._stackpath_query_get_list(
+ self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
+ )
+ except Exception:
+ raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
+ for instance in workload_instances:
+ if instance["phase"] == "RUNNING":
+ instance["stackSlug"] = stack_slug
+ instance["workloadId"] = workload["id"]
+ instance["workloadSlug"] = workload["slug"]
+ instance["cityCode"] = instance["location"]["cityCode"]
+ instance["countryCode"] = instance["location"]["countryCode"]
+ instance["continent"] = instance["location"]["continent"]
+ instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
+ try:
+ if instance[self.hostname_key]:
+ results.append(instance)
+ except KeyError:
+ pass
+ return results
+
+ def _populate(self, instances):
+ for instance in instances:
+ for group_key in self.group_keys:
+ group = group_key + "_" + instance[group_key]
+ group = group.lower().replace(" ", "_").replace("-", "_")
+ self.inventory.add_group(group)
+ self.inventory.add_host(instance[self.hostname_key],
+ group=group)
+
+ def _stackpath_query_get_list(self, url):
+ self._authenticate()
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + self.auth_token,
+ }
+ next_page = True
+ result = []
+ cursor = '-1'
+ while next_page:
+ resp = open_url(
+ url + '?page_request.first=10&page_request.after=%s' % cursor,
+ headers=headers,
+ method="GET"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ body_json = json.loads(body)
+ result.extend(body_json["results"])
+ next_page = body_json["pageInfo"]["hasNextPage"]
+ if next_page:
+ cursor = body_json["pageInfo"]["endCursor"]
+ return result
+
+ def _get_stack_slugs(self, stacks):
+ self.stack_slugs = [stack["slug"] for stack in stacks]
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
+ return True
+ display.debug(
+ "stackpath_compute inventory filename must end with \
+ 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
+ )
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ config = self._read_config_data(path)
+ self._validate_config(config)
+ self._set_credentials()
+
+ # get user specifications
+ self.use_internal_ip = self.get_option('use_internal_ip')
+ if self.use_internal_ip:
+ self.hostname_key = "ipAddress"
+ else:
+ self.hostname_key = "externalIpAddress"
+
+ self.stack_slugs = self.get_option('stack_slugs')
+ if not self.stack_slugs:
+ try:
+ stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
+ self._get_stack_slugs(stacks)
+ except Exception:
+ raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query()
+
+ self._populate(results)
+
+ # If the cache has expired/doesn't exist or
+ # if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ try:
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+ except Exception:
+ raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py
new file mode 100644
index 00000000..391a83c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py
@@ -0,0 +1,283 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: virtualbox
+ plugin_type: inventory
+ short_description: virtualbox inventory source
+ description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'virtualbox' plugin
+ required: True
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: toggles showing all vms vs only those currently running
+ type: boolean
+ default: False
+ settings_password_file:
+ description: provide a file containing the settings password (equivalent to --settingspwfile)
+ network_info_path:
+ description: property path to query for network information (ansible_host)
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: create vars from virtualbox properties
+ type: dictionary
+ default: {}
+'''
+
+EXAMPLES = '''
+# file must be named vbox.yaml or vbox.yml
+simple_config_file:
+ plugin: community.general.virtualbox
+ settings_password_file: /etc/virtulbox/secrets
+ query:
+ logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
+ compose:
+ ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+
+# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
+plugin: community.general.virtualbox
+groups:
+ container: "'minis' in (inventory_hostname)"
+'''
+
+import os
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using local virtualbox. '''
+
+ NAME = 'community.general.virtualbox'
+ VBOX = "VBoxManage"
+
+ def __init__(self):
+ self._vbox_path = None
+ super(InventoryModule, self).__init__()
+
+ def _query_vbox_data(self, host, property_path):
+ ret = None
+ try:
+ cmd = [self._vbox_path, b'guestproperty', b'get',
+ to_bytes(host, errors='surrogate_or_strict'),
+ to_bytes(property_path, errors='surrogate_or_strict')]
+ x = Popen(cmd, stdout=PIPE)
+ ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
+ if 'Value' in ipinfo:
+ a, ip = ipinfo.split(':', 1)
+ ret = ip.strip()
+ except Exception:
+ pass
+ return ret
+
+ def _set_variables(self, hostvars):
+
+ # set vars in inventory from hostvars
+ for host in hostvars:
+
+ query = self.get_option('query')
+ # create vars from vbox properties
+ if query and isinstance(query, MutableMapping):
+ for varname in query:
+ hostvars[host][varname] = self._query_vbox_data(host, query[varname])
+
+ strict = self.get_option('strict')
+
+ # create composite vars
+ self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict)
+
+ # actually update inventory
+ for key in hostvars[host]:
+ self.inventory.set_variable(host, key, hostvars[host][key])
+
+ # constructed groups based on conditionals
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict)
+
+ # constructed keyed_groups
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
+
+ def _populate_from_cache(self, source_data):
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ group = self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+ if not source_data:
+ for host in hostvars:
+ self.inventory.add_host(host)
+ self._populate_host_vars([host], hostvars.get(host, {}))
+
+ def _populate_from_source(self, source_data, using_current_cache=False):
+ if using_current_cache:
+ self._populate_from_cache(source_data)
+ return source_data
+
+ cacheable_results = {'_meta': {'hostvars': {}}}
+
+ hostvars = {}
+ prevkey = pref_k = ''
+ current_host = None
+
+ # needed to possibly set ansible_host
+ netinfo = self.get_option('network_info_path')
+
+ for line in source_data:
+ line = to_text(line)
+ if ':' not in line:
+ continue
+ try:
+ k, v = line.split(':', 1)
+ except Exception:
+ # skip non splitable
+ continue
+
+ if k.strip() == '':
+ # skip empty
+ continue
+
+ v = v.strip()
+ # found host
+ if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
+ current_host = v
+ if current_host not in hostvars:
+ hostvars[current_host] = {}
+ self.inventory.add_host(current_host)
+
+ # try to get network info
+ netdata = self._query_vbox_data(current_host, netinfo)
+ if netdata:
+ self.inventory.set_variable(current_host, 'ansible_host', netdata)
+
+ # found groups
+ elif k == 'Groups':
+ for group in v.split('/'):
+ if group:
+ group = self.inventory.add_group(group)
+ self.inventory.add_child(group, current_host)
+ if group not in cacheable_results:
+ cacheable_results[group] = {'hosts': []}
+ cacheable_results[group]['hosts'].append(current_host)
+ continue
+
+ else:
+ # found vars, accumulate in hostvars for clean inventory set
+ pref_k = 'vbox_' + k.strip().replace(' ', '_')
+ if k.startswith(' '):
+ if prevkey not in hostvars[current_host]:
+ hostvars[current_host][prevkey] = {}
+ hostvars[current_host][prevkey][pref_k] = v
+ else:
+ if v != '':
+ hostvars[current_host][pref_k] = v
+ if self._ungrouped_host(current_host, cacheable_results):
+ if 'ungrouped' not in cacheable_results:
+ cacheable_results['ungrouped'] = {'hosts': []}
+ cacheable_results['ungrouped']['hosts'].append(current_host)
+
+ prevkey = pref_k
+
+ self._set_variables(hostvars)
+ for host in hostvars:
+ h = self.inventory.get_host(host)
+ cacheable_results['_meta']['hostvars'][h.name] = h.vars
+
+ return cacheable_results
+
+ def _ungrouped_host(self, host, inventory):
+ def find_host(host, inventory):
+ for k, v in inventory.items():
+ if k == '_meta':
+ continue
+ if isinstance(v, dict):
+ yield self._ungrouped_host(host, v)
+ elif isinstance(v, list):
+ yield host not in v
+ yield True
+
+ return all([found_host for found_host in find_host(host, inventory)])
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ try:
+ self._vbox_path = get_bin_path(self.VBOX)
+ except ValueError as e:
+ raise AnsibleParserError(e)
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ source_data = None
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ if not source_data:
+ b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru')
+ running = self.get_option('running_only')
+
+ # start getting data
+ cmd = [self._vbox_path, b'list', b'-l']
+ if running:
+ cmd.append(b'runningvms')
+ else:
+ cmd.append(b'vms')
+
+ if b_pwfile and os.path.exists(b_pwfile):
+ cmd.append(b'--settingspwfile')
+ cmd.append(b_pwfile)
+
+ try:
+ p = Popen(cmd, stdout=PIPE)
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ source_data = p.stdout.read().splitlines()
+
+ using_current_cache = cache and not update_cache
+ cacheable_results = self._populate_from_source(source_data, using_current_cache)
+
+ if update_cache:
+ self._cache[cache_key] = cacheable_results
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py
new file mode 100644
index 00000000..46f192b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py
@@ -0,0 +1,77 @@
+# (c) 2013, Bradley Young <young.bradley@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: cartesian
+ short_description: returns the cartesian product of lists
+ description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ You can see the exact syntax in the examples section.
+ options:
+ _raw:
+ description:
+ - a set of lists
+ required: True
+'''
+
+EXAMPLES = """
+- name: Example of the change in the description
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
+
+- name: loops over the cartesian product of the supplied lists
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cartesian:
+ - "{{list1}}"
+ - "{{list2}}"
+ - [1,2,3,4,5,6]
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of lists composed of elements of the input lists
+ type: list
+ elements: list
+"""
+
+from itertools import product
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+ """
+ Create the cartesian product of lists
+ """
+
+ def _lookup_variables(self, terms):
+ """
+ Turn this:
+ terms == ["1,2,3", "a,b"]
+ into this:
+ terms == [[1,2,3], [a, b]]
+ """
+ results = []
+ for x in terms:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms)
+
+ my_list = terms[:]
+ if len(my_list) == 0:
+ raise AnsibleError("with_cartesian requires at least one element in each list")
+
+ return [self._flatten(x) for x in product(*my_list)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py
new file mode 100644
index 00000000..c3263e88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py
@@ -0,0 +1,104 @@
+# (c) 2016, Josh Bradley <jbradley(at)digitalocean.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: chef_databag
+ short_description: fetches data from a Chef Databag
+ description:
+ - "This is a lookup plugin to provide access to chef data bags using the pychef package.
+ It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
+ starting from either the given base path or the current working directory.
+ The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
+ file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
+ requirements:
+ - "pychef (python library https://pychef.readthedocs.io `pip install pychef`)"
+ options:
+ name:
+ description:
+ - Name of the databag
+ required: True
+ item:
+ description:
+ - Item to fetch
+ required: True
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
+
+try:
+ import chef
+ HAS_CHEF = True
+except ImportError as missing_module:
+ HAS_CHEF = False
+
+
+class LookupModule(LookupBase):
+ """
+ Chef data bag lookup module
+ """
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupModule, self).__init__(loader, templar, **kwargs)
+
+ # setup vars for data bag name and data bag item
+ self.name = None
+ self.item = None
+
+ def parse_kv_args(self, args):
+ """
+ parse key-value style arguments
+ """
+
+ for arg in ["name", "item"]:
+ try:
+ arg_raw = args.pop(arg, None)
+ if arg_raw is None:
+ continue
+ parsed = str(arg_raw)
+ setattr(self, arg, parsed)
+ except ValueError:
+ raise AnsibleError(
+ "can't parse arg {0}={1} as string".format(arg, arg_raw)
+ )
+ if args:
+ raise AnsibleError(
+ "unrecognized arguments to with_sequence: %r" % args.keys()
+ )
+
+ def run(self, terms, variables=None, **kwargs):
+ # Ensure pychef has been loaded
+ if not HAS_CHEF:
+ raise AnsibleError('PyChef needed for lookup plugin, try `pip install pychef`')
+
+ for term in terms:
+ self.parse_kv_args(parse_kv(term))
+
+ api_object = chef.autoconfigure()
+
+ if not isinstance(api_object, chef.api.ChefAPI):
+ raise AnsibleError('Unable to connect to Chef Server API.')
+
+ data_bag_object = chef.DataBag(self.name)
+
+ data_bag_item = data_bag_object[self.item]
+
+ return [dict(data_bag_item)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py
new file mode 100644
index 00000000..91c50595
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py
@@ -0,0 +1,191 @@
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: consul_kv
+ short_description: Fetch metadata from a Consul key value store.
+ description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster.
+ Values can be easily set in the kv store with simple rest commands
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
+ requirements:
+ - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
+ options:
+ _raw:
+ description: List of key(s) to retrieve.
+ type: list
+ recurse:
+ type: boolean
+ description: If true, will retrieve all the values that have the given key as prefix.
+ default: False
+ index:
+ description:
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ token:
+ description: The acl token to allow access to restricted values.
+ host:
+ default: localhost
+ description:
+ - The target to connect to, must be a resolvable address.
+ Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
+ - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ default: 8500
+ scheme:
+ default: http
+ description:
+ - Whether to use http or https.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ validate_certs:
+ default: True
+ description: Whether to verify the ssl connection or not.
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the ssl connection.
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+ - name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+ - name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
+"""
+
+import os
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+
+ HAS_CONSUL = True
+except ImportError as e:
+ HAS_CONSUL = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not HAS_CONSUL:
+ raise AnsibleError(
+ 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ scheme = self.get_option('scheme')
+ host = self.get_option('host')
+ port = self.get_option('port')
+ url = self.get_option('url')
+ if url is not None:
+ u = urlparse(url)
+ if u.scheme:
+ scheme = u.scheme
+ host = u.hostname
+ if u.port is not None:
+ port = u.port
+
+ validate_certs = self.get_option('validate_certs')
+ client_cert = self.get_option('client_cert')
+
+ values = []
+ try:
+ for term in terms:
+ params = self.parse_params(term)
+ consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert)
+
+ results = consul_api.kv.get(params['key'],
+ token=params['token'],
+ index=params['index'],
+ recurse=params['recurse'],
+ dc=params['datacenter'])
+ if results[1]:
+ # responds with a single or list of result maps
+ if isinstance(results[1], list):
+ for r in results[1]:
+ values.append(to_text(r['Value']))
+ else:
+ values.append(to_text(results[1]['Value']))
+ except Exception as e:
+ raise AnsibleError(
+ "Error locating '%s' in kv store. Error was %s" % (term, e))
+
+ return values
+
+ def parse_params(self, term):
+ params = term.split(' ')
+
+ paramvals = {
+ 'key': params[0],
+ 'token': None,
+ 'recurse': False,
+ 'index': None,
+ 'datacenter': None
+ }
+
+ # parameters specified?
+ try:
+ for param in params[1:]:
+ if param and len(param) > 0:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
+ paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ return paramvals
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py
new file mode 100644
index 00000000..6ab4d3bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py
@@ -0,0 +1,125 @@
+# (c) 2015, Ensighten <infra@ensighten.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: credstash
+ short_description: retrieve secrets from Credstash on AWS
+ requirements:
+ - credstash (python library)
+ description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
+ options:
+ _terms:
+ description: term or list of terms to lookup in the credit store
+ type: list
+ required: True
+ table:
+ description: name of the credstash table to query
+ default: 'credential-store'
+ required: True
+ version:
+ description: Credstash version
+ region:
+ description: AWS region
+ profile_name:
+ description: AWS profile to use for authentication
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token
+ env:
+ - name: AWS_SESSION_TOKEN
+'''
+
+EXAMPLES = """
+- name: first use credstash to store your secrets
+ ansible.builtin.shell: credstash put my-github-password secure123
+
+- name: "Test credstash lookup plugin -- get my github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}"
+
+- name: "Test credstash lookup plugin -- get my other password from us-west-1"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}"
+
+- name: "Test credstash lookup plugin -- get the company's github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}"
+
+- name: Example play using the 'context' feature
+ hosts: localhost
+ vars:
+ context:
+ app: my_app
+ environment: production
+ tasks:
+
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
+"""
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+CREDSTASH_INSTALLED = False
+
+try:
+ import credstash
+ CREDSTASH_INSTALLED = True
+except ImportError:
+ CREDSTASH_INSTALLED = False
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+
+ if not CREDSTASH_INSTALLED:
+ raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
+
+ ret = []
+ for term in terms:
+ try:
+ version = kwargs.pop('version', '')
+ region = kwargs.pop('region', None)
+ table = kwargs.pop('table', 'credential-store')
+ profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
+ aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
+ aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
+ aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
+ kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
+ val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
+ except credstash.ItemNotFound:
+ raise AnsibleError('Key {0} not found'.format(term))
+ except Exception as e:
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ ret.append(val)
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
new file mode 100644
index 00000000..449cb916
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
@@ -0,0 +1,182 @@
+# (c) 2017, Edward Nunez <edward.nunez@cyberark.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: cyberarkpassword
+ short_description: get secrets from CyberArk AIM
+ requirements:
+ - CyberArk AIM tool installed
+ description:
+ - Get secrets from CyberArk AIM.
+ options :
+ _command:
+ description: Cyberark CLI utility.
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ required: True
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ required: True
+ output:
+ description:
+ - Specifies the desired output fields separated by commas.
+ - "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
+ default: 'password'
+ _extra:
+ description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
+ notes:
+ - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
+'''
+
+EXAMPLES = """
+ - name: passing options to the lookup
+ ansible.builtin.debug:
+ msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
+ vars:
+ cyquery:
+ appid: "app_ansible"
+ query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
+ output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
+
+
+ - name: used in a loop
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cyberarkpassword:
+ appid: 'app_ansible'
+ query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
+ output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+"""
+
+RETURN = """
+ password:
+ description:
+ - The actual value stored
+ passprops:
+ description: properties assigned to the entry
+ type: dictionary
+ passwordchangeinprocess:
+ description: did the password change?
+"""
+
+import os
+import subprocess
+from subprocess import PIPE
+from subprocess import Popen
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.utils.display import Display
+
+display = Display()
+
+CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk')
+
+
+class CyberarkPassword:
+
+ def __init__(self, appid=None, query=None, output=None, **kwargs):
+
+ self.appid = appid
+ self.query = query
+ self.output = output
+
+ # Support for Generic parameters to be able to specify
+ # FailRequestOnPasswordChange, Queryformat, Reason, etc.
+ self.extra_parms = []
+ for key, value in kwargs.items():
+ self.extra_parms.append('-p')
+ self.extra_parms.append("%s=%s" % (key, value))
+
+ if self.appid is None:
+ raise AnsibleError("CyberArk Error: No Application ID specified")
+ if self.query is None:
+ raise AnsibleError("CyberArk Error: No Vault query specified")
+
+ if self.output is None:
+ # If no output is specified, return at least the password
+ self.output = "password"
+ else:
+ # To avoid reference issues/confusion to values, all
+ # output 'keys' will be in lowercase.
+ self.output = self.output.lower()
+
+ self.b_delimiter = b"@#@" # Known delimiter to split output results
+
+ def get(self):
+
+ result_dict = {}
+
+ try:
+ all_parms = [
+ CLIPASSWORDSDK_CMD,
+ 'GetPassword',
+ '-p', 'AppDescs.AppID=%s' % self.appid,
+ '-p', 'Query=%s' % self.query,
+ '-o', self.output,
+ '-d', self.b_delimiter]
+ all_parms.extend(self.extra_parms)
+
+ b_credential = b""
+ b_all_params = [to_bytes(v) for v in all_parms]
+ tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate()
+
+ if tmp_output:
+ b_credential = to_bytes(tmp_output)
+
+ if tmp_error:
+ raise AnsibleError("ERROR => %s " % (tmp_error))
+
+ if b_credential and b_credential.endswith(b'\n'):
+ b_credential = b_credential[:-1]
+
+ output_names = self.output.split(",")
+ output_values = b_credential.split(self.b_delimiter)
+
+ for i in range(len(output_names)):
+ if output_names[i].startswith("passprops."):
+ if "passprops" not in result_dict:
+ result_dict["passprops"] = {}
+ output_prop_name = output_names[i][10:]
+ result_dict["passprops"][output_prop_name] = to_native(output_values[i])
+ else:
+ result_dict[output_names[i]] = to_native(output_values[i])
+
+ except subprocess.CalledProcessError as e:
+ raise AnsibleError(e.output)
+ except OSError as e:
+ raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
+
+ return [result_dict]
+
+
+class LookupModule(LookupBase):
+
+ """
+ USAGE:
+
+ """
+
+ def run(self, terms, variables=None, **kwargs):
+
+ display.vvvv("%s" % terms)
+ if isinstance(terms, list):
+ return_values = []
+ for term in terms:
+ display.vvvv("Term: %s" % term)
+ cyberark_conn = CyberarkPassword(**term)
+ return_values.append(cyberark_conn.get())
+ return return_values
+ else:
+ cyberark_conn = CyberarkPassword(**terms)
+ result = cyberark_conn.get()
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py
new file mode 100644
index 00000000..6dc8fc6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py
@@ -0,0 +1,356 @@
+# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: dig
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query DNS using the dnspython library
+ requirements:
+ - dnspython (python library, http://www.dnspython.org/)
+ description:
+ - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
+ It is possible to lookup any DNS record in this manner.
+ - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
+ It is also possible to explicitly specify the DNS server(s) to use for lookups.
+ - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
+ - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
+ This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
+ - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
+ In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list
+ over which you can iterate later on.
+ - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
+ It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
+ This needs to be passed-in as an additional parameter to the lookup
+ options:
+ _terms:
+ description: domain(s) to query
+ qtype:
+ description: record type to query
+ default: 'A'
+ choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
+ flat:
+ description: If 0 each record is returned as a dictionary, otherwise a string
+ default: 1
+ notes:
+ - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
+ - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
+ - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
+ Syntax for specifying the record type is shown in the examples below.
+ - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
+'''
+
+EXAMPLES = """
+- name: Simple A record (IPV4 address) lookup for example.com
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.com.')}}"
+
+- name: "The TXT record for example.org."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}"
+
+- name: "The TXT record for example.org, alternative syntax."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}"
+
+- name: use in a loop
+ ansible.builtin.debug:
+ msg: "MX record for gmail.com {{ item }}"
+ with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}"
+
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
+- ansible.builtin.debug:
+ msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
+
+- ansible.builtin.debug:
+ msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
+ with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - List of composed strings or dictionaries with key and value
+ If a dictionary, fields shows the keys returned depending on query type
+ type: list
+ elements: raw
+ contains:
+ ALL:
+ description:
+ - owner, ttl, type
+ A:
+ description:
+ - address
+ AAAA:
+ description:
+ - address
+ CNAME:
+ description:
+ - target
+ DNAME:
+ description:
+ - target
+ DLV:
+ description:
+ - algorithm, digest_type, key_tag, digest
+ DNSKEY:
+ description:
+ - flags, algorithm, protocol, key
+ DS:
+ description:
+ - algorithm, digest_type, key_tag, digest
+ HINFO:
+ description:
+ - cpu, os
+ LOC:
+ description:
+ - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
+ MX:
+ description:
+ - preference, exchange
+ NAPTR:
+ description:
+ - order, preference, flags, service, regexp, replacement
+ NS:
+ description:
+ - target
+ NSEC3PARAM:
+ description:
+ - algorithm, flags, iterations, salt
+ PTR:
+ description:
+ - target
+ RP:
+ description:
+ - mbox, txt
+ SOA:
+ description:
+ - mname, rname, serial, refresh, retry, expire, minimum
+ SPF:
+ description:
+ - strings
+ SRV:
+ description:
+ - priority, weight, port, target
+ SSHFP:
+ description:
+ - algorithm, fp_type, fingerprint
+ TLSA:
+ description:
+ - usage, selector, mtype, cert
+ TXT:
+ description:
+ - strings
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native
+import socket
+
+try:
+ import dns.exception
+ import dns.name
+ import dns.resolver
+ import dns.reversename
+ import dns.rdataclass
+ from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
+ MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
+ HAVE_DNS = True
+except ImportError:
+ HAVE_DNS = False
+
+
+def make_rdata_dict(rdata):
+ ''' While the 'dig' lookup plugin supports anything which dnspython supports
+ out of the box, the following supported_types list describes which
+ DNS query types we can convert to a dict.
+
+ Note: adding support for RRSIG is hard work. :)
+ '''
+ supported_types = {
+ A: ['address'],
+ AAAA: ['address'],
+ CNAME: ['target'],
+ DNAME: ['target'],
+ DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'],
+ DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
+ DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
+ HINFO: ['cpu', 'os'],
+ LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
+ MX: ['preference', 'exchange'],
+ NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
+ NS: ['target'],
+ NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
+ PTR: ['target'],
+ RP: ['mbox', 'txt'],
+ # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
+ SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
+ SPF: ['strings'],
+ SRV: ['priority', 'weight', 'port', 'target'],
+ SSHFP: ['algorithm', 'fp_type', 'fingerprint'],
+ TLSA: ['usage', 'selector', 'mtype', 'cert'],
+ TXT: ['strings'],
+ }
+
+ rd = {}
+
+ if rdata.rdtype in supported_types:
+ fields = supported_types[rdata.rdtype]
+ for f in fields:
+ val = rdata.__getattribute__(f)
+
+ if isinstance(val, dns.name.Name):
+ val = dns.name.Name.to_text(val)
+
+ if rdata.rdtype == DLV and f == 'digest':
+ val = dns.rdata._hexify(rdata.digest).replace(' ', '')
+ if rdata.rdtype == DS and f == 'digest':
+ val = dns.rdata._hexify(rdata.digest).replace(' ', '')
+ if rdata.rdtype == DNSKEY and f == 'key':
+ val = dns.rdata._base64ify(rdata.key).replace(' ', '')
+ if rdata.rdtype == NSEC3PARAM and f == 'salt':
+ val = dns.rdata._hexify(rdata.salt).replace(' ', '')
+ if rdata.rdtype == SSHFP and f == 'fingerprint':
+ val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
+ if rdata.rdtype == TLSA and f == 'cert':
+ val = dns.rdata._hexify(rdata.cert).replace(' ', '')
+
+ rd[f] = val
+
+ return rd
+
+
+# ==============================================================
+# dig: Lookup DNS records
+#
+# --------------------------------------------------------------
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ '''
+ terms contains a string with things to `dig' for. We support the
+ following formats:
+ example.com # A record
+ example.com qtype=A # same
+ example.com/TXT # specific qtype
+ example.com qtype=txt # same
+ 192.0.2.23/PTR # reverse PTR
+ ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR
+ example.net/AAAA @nameserver # query specified server
+ ^^^ can be comma-sep list of names/addresses
+
+ ... flat=0 # returns a dict; default is 1 == string
+ '''
+
+ if HAVE_DNS is False:
+ raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
+
+ # Create Resolver object so that we can set NS if necessary
+ myres = dns.resolver.Resolver(configure=True)
+ edns_size = 4096
+ myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
+
+ domain = None
+ qtype = 'A'
+ flat = True
+ rdclass = dns.rdataclass.from_text('IN')
+
+ for t in terms:
+ if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
+ nsset = t[1:].split(',')
+ for ns in nsset:
+ nameservers = []
+ # Check if we have a valid IP address. If so, use that, otherwise
+ # try to resolve name to address using system's resolver. If that
+ # fails we bail out.
+ try:
+ socket.inet_aton(ns)
+ nameservers.append(ns)
+ except Exception:
+ try:
+ nsaddr = dns.resolver.query(ns)[0].address
+ nameservers.append(nsaddr)
+ except Exception as e:
+ raise AnsibleError("dns lookup NS: %s" % to_native(e))
+ myres.nameservers = nameservers
+ continue
+ if '=' in t:
+ try:
+ opt, arg = t.split('=')
+ except Exception:
+ pass
+
+ if opt == 'qtype':
+ qtype = arg.upper()
+ elif opt == 'flat':
+ flat = int(arg)
+ elif opt == 'class':
+ try:
+ rdclass = dns.rdataclass.from_text(arg)
+ except Exception as e:
+ raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+
+ continue
+
+ if '/' in t:
+ try:
+ domain, qtype = t.split('/')
+ except Exception:
+ domain = t
+ else:
+ domain = t
+
+ # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
+
+ ret = []
+
+ if qtype.upper() == 'PTR':
+ try:
+ n = dns.reversename.from_address(domain)
+ domain = n.to_text()
+ except dns.exception.SyntaxError:
+ pass
+ except Exception as e:
+ raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
+
+ try:
+ answers = myres.query(domain, qtype, rdclass=rdclass)
+ for rdata in answers:
+ s = rdata.to_text()
+ if qtype.upper() == 'TXT':
+ s = s[1:-1] # Strip outside quotes on TXT rdata
+
+ if flat:
+ ret.append(s)
+ else:
+ try:
+ rd = make_rdata_dict(rdata)
+ rd['owner'] = answers.canonical_name.to_text()
+ rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
+ rd['ttl'] = answers.rrset.ttl
+ rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
+
+ ret.append(rd)
+ except Exception as e:
+ ret.append(str(e))
+
+ except dns.resolver.NXDOMAIN:
+ ret.append('NXDOMAIN')
+ except dns.resolver.NoAnswer:
+ ret.append("")
+ except dns.resolver.Timeout:
+ ret.append('')
+ except dns.exception.DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py
new file mode 100644
index 00000000..19e28e1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py
@@ -0,0 +1,96 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: dnstxt
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query a domain(s)'s DNS txt fields
+ requirements:
+ - dns/dns.resolver (python library)
+ description:
+ - Uses a python library to return the DNS TXT record for a domain.
+ options:
+ _terms:
+ description: domain or list of domains to query TXT records from
+ required: True
+ type: list
+'''
+
+EXAMPLES = """
+- name: show txt entry
+ ansible.builtin.debug:
+ msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
+
+- name: iterate over txt entries
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt:
+ - 'test.example.com'
+ - 'other.example.com'
+ - 'last.example.com'
+
+- name: iterate of a comma delimited DNS TXT entry
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - values returned by the DNS TXT record.
+ type: list
+"""
+
+HAVE_DNS = False
+try:
+ import dns.resolver
+ from dns.exception import DNSException
+ HAVE_DNS = True
+except ImportError:
+ pass
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+
+# ==============================================================
+# DNSTXT: DNS TXT records
+#
+# key=domainname
+# TODO: configurable resolver IPs
+# --------------------------------------------------------------
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if HAVE_DNS is False:
+ raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
+
+ ret = []
+ for term in terms:
+ domain = term.split()[0]
+ string = []
+ try:
+ answers = dns.resolver.query(domain, 'TXT')
+ for rdata in answers:
+ s = rdata.to_text()
+ string.append(s[1:-1]) # Strip outside quotes on TXT rdata
+
+ except dns.resolver.NXDOMAIN:
+ string = 'NXDOMAIN'
+ except dns.resolver.Timeout:
+ string = ''
+ except dns.resolver.NoAnswer:
+ string = ''
+ except DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+
+ ret.append(''.join(string))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py
new file mode 100644
index 00000000..18165f9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+lookup: dsv
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic DevOps Secrets Vault
+version_added: 1.0.0
+description:
+ - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
+ DSV I(tenant) using a I(client_id) and I(client_secret).
+requirements:
+ - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
+options:
+ _terms:
+ description: The path to the secret, e.g. C(/staging/servers/web1).
+ required: true
+ tenant:
+ description: The first format parameter in the default I(url_template).
+ env:
+ - name: DSV_TENANT
+ ini:
+ - section: dsv_lookup
+ key: tenant
+ required: true
+ tld:
+ default: com
+ description: The top-level domain of the tenant; the second format
+ parameter in the default I(url_template).
+ env:
+ - name: DSV_TLD
+ ini:
+ - section: dsv_lookup
+ key: tld
+ required: false
+ client_id:
+ description: The client_id with which to request the Access Grant.
+ env:
+ - name: DSV_CLIENT_ID
+ ini:
+ - section: dsv_lookup
+ key: client_id
+ required: true
+ client_secret:
+ description: The client secret associated with the specific I(client_id).
+ env:
+ - name: DSV_CLIENT_SECRET
+ ini:
+ - section: dsv_lookup
+ key: client_secret
+ required: true
+ url_template:
+ default: https://{}.secretsvaultcloud.{}/v1
+ description: The path to prepend to the base URL to form a valid REST
+ API request.
+ env:
+ - name: DSV_URL_TEMPLATE
+ ini:
+ - section: dsv_lookup
+ key: url_template
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - One or more JSON responses to C(GET /secrets/{path}).
+ - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
+ tasks:
+ - ansible.builtin.debug:
+ msg: 'the password is {{ secret["data"]["password"] }}'
+"""
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+
+sdk_is_missing = False
+
+try:
+ from thycotic.secrets.vault import (
+ SecretsVault,
+ SecretsVaultError,
+ )
+except ImportError:
+ sdk_is_missing = True
+
+from ansible.utils.display import Display
+from ansible.plugins.lookup import LookupBase
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def Client(vault_parameters):
+ return SecretsVault(**vault_parameters)
+
+ def run(self, terms, variables, **kwargs):
+ if sdk_is_missing:
+ raise AnsibleError("python-dsv-sdk must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ vault = LookupModule.Client(
+ {
+ "tenant": self.get_option("tenant"),
+ "client_id": self.get_option("client_id"),
+ "client_secret": self.get_option("client_secret"),
+ "url_template": self.get_option("url_template"),
+ }
+ )
+ result = []
+
+ for term in terms:
+ display.debug("dsv_lookup term: %s" % term)
+ try:
+ path = term.lstrip("[/:]")
+
+ if path == "":
+ raise AnsibleOptionsError("Invalid secret path: %s" % term)
+
+ display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path)
+ result.append(vault.get_secret_json(path))
+ except SecretsVaultError as error:
+ raise AnsibleError(
+ "DevOps Secrets Vault lookup failure: %s" % error.message
+ )
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py
new file mode 100644
index 00000000..91724df7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py
@@ -0,0 +1,180 @@
+# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
+# (m) 2016, Mihai Moldovanu <mihaim@tfm.ro>
+# (m) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Jan-Piet Mens (@jpmens)
+ lookup: etcd
+ short_description: get info from an etcd server
+ description:
+ - Retrieves data from an etcd server
+ options:
+ _terms:
+ description:
+ - the list of keys to lookup on the etcd server
+ type: list
+ elements: string
+ required: True
+ url:
+ description:
+ - Environment variable with the url for the etcd server
+ default: 'http://127.0.0.1:4001'
+ env:
+ - name: ANSIBLE_ETCD_URL
+ version:
+ description:
+ - Environment variable with the etcd protocol version
+ default: 'v1'
+ env:
+ - name: ANSIBLE_ETCD_VERSION
+ validate_certs:
+ description:
+ - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
+ default: True
+ type: boolean
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}"
+
+- name: "since Ansible 2.5 you can set server options inline"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - list of values associated with input keys
+ type: list
+ elements: string
+'''
+
+import json
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url
+
+# this can be made configurable, not should not use ansible.cfg
+#
+# Made module configurable from playbooks:
+# If etcd v2 running on host 192.168.1.21 on port 2379
+# we can use the following in a playbook to retrieve /tfm/network/config key
+#
+# - ansible.builtin.debug: msg={{lookup('etcd','/tfm/network/config', url='http://192.168.1.21:2379' , version='v2')}}
+#
+# Example Output:
+#
+# TASK [debug] *******************************************************************
+# ok: [localhost] => {
+# "msg": {
+# "Backend": {
+# "Type": "vxlan"
+# },
+# "Network": "172.30.0.0/16",
+# "SubnetLen": 24
+# }
+# }
+#
+#
+#
+#
+
+
+class Etcd:
+ def __init__(self, url, version, validate_certs):
+ self.url = url
+ self.version = version
+ self.baseurl = '%s/%s/keys' % (self.url, self.version)
+ self.validate_certs = validate_certs
+
+ def _parse_node(self, node):
+ # This function will receive all etcd tree,
+ # if the level requested has any node, the recursion starts
+ # create a list in the dir variable and it is passed to the
+ # recursive function, and so on, if we get a variable,
+ # the function will create a key-value at this level and
+ # undoing the loop.
+ path = {}
+ if node.get('dir', False):
+ for n in node.get('nodes', []):
+ path[n['key'].split('/')[-1]] = self._parse_node(n)
+
+ else:
+ path = node['value']
+
+ return path
+
+ def get(self, key):
+ url = "%s/%s?recursive=true" % (self.baseurl, key)
+ data = None
+ value = {}
+ try:
+ r = open_url(url, validate_certs=self.validate_certs)
+ data = r.read()
+ except Exception:
+ return None
+
+ try:
+ # I will not support Version 1 of etcd for folder parsing
+ item = json.loads(data)
+ if self.version == 'v1':
+ # When ETCD are working with just v1
+ if 'value' in item:
+ value = item['value']
+ else:
+ if 'node' in item:
+ # When a usual result from ETCD
+ value = self._parse_node(item['node'])
+
+ if 'errorCode' in item:
+ # Here return an error when an unknown entry responds
+ value = "ENOENT"
+ except Exception:
+ raise
+
+ return value
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ validate_certs = self.get_option('validate_certs')
+ url = self.get_option('url')
+ version = self.get_option('version')
+
+ etcd = Etcd(url=url, version=version, validate_certs=validate_certs)
+
+ ret = []
+ for term in terms:
+ key = term.split()[0]
+ value = etcd.get(key)
+ ret.append(value)
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py
new file mode 100644
index 00000000..55bbed2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
+ version_added: '0.2.0'
+ lookup: etcd3
+ short_description: Get key values from etcd3 server
+ description:
+ - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
+ - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
+ - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+
+ options:
+ _terms:
+ description:
+ - The list of keys (or key prefixes) to look up on the etcd3 server.
+ type: list
+ elements: str
+ required: True
+ prefix:
+ description:
+ - Look for key or prefix key.
+ type: bool
+ default: False
+ endpoints:
+ description:
+ - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
+ Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(<host>:<port>) form.
+ - The C(host) part is overwritten by I(host) option, if defined.
+ - The C(port) part is overwritten by I(port) option, if defined.
+ env:
+ - name: ETCDCTL_ENDPOINTS
+ default: '127.0.0.1:2379'
+ type: str
+ host:
+ description:
+ - etcd3 listening client host.
+ - Takes precedence over I(endpoints).
+ type: str
+ port:
+ description:
+ - etcd3 listening client port.
+ - Takes precedence over I(endpoints).
+ type: int
+ ca_cert:
+ description:
+ - etcd3 CA authority.
+ env:
+ - name: ETCDCTL_CACERT
+ type: str
+ cert_cert:
+ description:
+ - etcd3 client certificate.
+ env:
+ - name: ETCDCTL_CERT
+ type: str
+ cert_key:
+ description:
+ - etcd3 client private key.
+ env:
+ - name: ETCDCTL_KEY
+ type: str
+ timeout:
+ description:
+ - Client timeout.
+ default: 60
+ env:
+ - name: ETCDCTL_DIAL_TIMEOUT
+ type: int
+ user:
+ description:
+ - Authenticated user name.
+ env:
+ - name: ETCDCTL_USER
+ type: str
+ password:
+ description:
+ - Authenticated user password.
+ env:
+ - name: ETCDCTL_PASSWORD
+ type: str
+
+ notes:
+ - I(host) and I(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
+ environment variable and keep I(endpoints), I(host), and I(port) unused.
+ seealso:
+ - module: community.general.etcd3
+ - ref: etcd_lookup
+ description: The etcd v2 lookup.
+
+ requirements:
+ - "etcd3 >= 0.10"
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
+
+- name: "look for a key prefix"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
+
+- name: "connect to etcd3 with a client certificate"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - List of keys and associated values.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The element's key.
+ type: str
+ value:
+ description: The element's value.
+ type: str
+'''
+
+import re
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError, AnsibleLookupError
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ HAS_ETCD = False
+
+display = Display()
+
+etcd3_cnx_opts = (
+ 'host',
+ 'port',
+ 'ca_cert',
+ 'cert_key',
+ 'cert_cert',
+ 'timeout',
+ 'user',
+ 'password',
+ # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?)
+)
+
+
+def etcd3_client(client_params):
+ try:
+ etcd = etcd3.client(**client_params)
+ etcd.status()
+ except Exception as exp:
+ raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp)))
+ return etcd
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ if not HAS_ETCD:
+ display.error(missing_required_lib('etcd3'))
+ return None
+
+ # create the etcd3 connection parameters dict to pass to etcd3 class
+ client_params = {}
+
+ # etcd3 class expects host and port as connection parameters, so endpoints
+ # must be mangled a bit to fit in this scheme.
+ # so here we use a regex to extract server and port
+ match = re.compile(
+ r'^(https?://)?(?P<host>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P<port>\d{1,5}))?/?$'
+ ).match(self.get_option('endpoints'))
+ if match:
+ if match.group('host'):
+ client_params['host'] = match.group('host')
+ if match.group('port'):
+ client_params['port'] = match.group('port')
+
+ for opt in etcd3_cnx_opts:
+ if self.get_option(opt):
+ client_params[opt] = self.get_option(opt)
+
+ cnx_log = dict(client_params)
+ if 'password' in cnx_log:
+ cnx_log['password'] = '<redacted>'
+ display.verbose("etcd3 connection parameters: %s" % cnx_log)
+
+ # connect to etcd3 server
+ etcd = etcd3_client(client_params)
+
+ ret = []
+ # we can pass many keys to lookup
+ for term in terms:
+ if self.get_option('prefix'):
+ try:
+ for val, meta in etcd.get_prefix(term):
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp)))
+ else:
+ try:
+ val, meta = etcd.get(term)
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get: %s' % (to_native(exp)))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py
new file mode 100644
index 00000000..4b050968
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py
@@ -0,0 +1,218 @@
+# (c) 2016 Dag Wieers <dag@wieers.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+lookup: filetree
+author: Dag Wieers (@dagwieers) <dag@wieers.com>
+short_description: recursively match all files in a directory tree
+description:
+- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
+- Supports directories, files and symlinks, including SELinux and other file properties.
+- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths.
+ This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role.
+options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+'''
+
+EXAMPLES = r"""
+- name: Create directories
+ ansible.builtin.file:
+ path: /web/{{ item.path }}
+ state: directory
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'directory'
+
+- name: Template files (explicitly skip directories in order to use the 'src' attribute)
+ ansible.builtin.template:
+ src: '{{ item.src }}'
+ dest: /web/{{ item.path }}
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'file'
+
+- name: Recreate symlinks
+ ansible.builtin.file:
+ src: '{{ item.src }}'
+ dest: /web/{{ item.path }}
+ state: link
+ force: yes
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'link'
+
+- name: list all files under web/
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.filetree', 'web/') }}"
+"""
+
+RETURN = r"""
+ _raw:
+ description: List of dictionaries with file information.
+ type: list
+ elements: dict
+ contains:
+ src:
+ description:
+ - Full path to file.
+ - Not returned when I(item.state) is set to C(directory).
+ type: path
+ root:
+ description: Allows filtering by original location.
+ type: path
+ path:
+ description: Contains the relative path to root.
+ type: path
+ mode:
+ description: The permissions the resulting file or directory.
+ type: str
+ state:
+ description: TODO
+ type: str
+ owner:
+ description: Name of the user that owns the file/directory.
+ type: raw
+ group:
+ description: Name of the group that owns the file/directory.
+ type: raw
+ seuser:
+ description: The user part of the SELinux file context.
+ type: raw
+ serole:
+ description: The role part of the SELinux file context.
+ type: raw
+ setype:
+ description: The type part of the SELinux file context.
+ type: raw
+ selevel:
+ description: The level part of the SELinux file context.
+ type: raw
+ uid:
+ description: Owner ID of the file/directory.
+ type: int
+ gid:
+ description: Group ID of the file/directory.
+ type: int
+ size:
+ description: Size of the target.
+ type: int
+ mtime:
+ description: Time of last modification.
+ type: float
+ ctime:
+ description: Time of last metadata update or creation (depends on OS).
+ type: float
+"""
+import os
+import pwd
+import grp
+import stat
+
+HAVE_SELINUX = False
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ pass
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# If selinux fails to find a default, return an array of None
+def selinux_context(path):
+ context = [None, None, None, None]
+ if HAVE_SELINUX and selinux.is_selinux_enabled():
+ try:
+ # note: the selinux module uses byte strings on python2 and text
+ # strings on python3
+ ret = selinux.lgetfilecon_raw(to_native(path))
+ except OSError:
+ return context
+ if ret[0] != -1:
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+
+def file_props(root, path):
+ ''' Returns dictionary with file properties, or return None on failure '''
+ abspath = os.path.join(root, path)
+
+ try:
+ st = os.lstat(abspath)
+ except OSError as e:
+ display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e))
+ return None
+
+ ret = dict(root=root, path=path)
+
+ if stat.S_ISLNK(st.st_mode):
+ ret['state'] = 'link'
+ ret['src'] = os.readlink(abspath)
+ elif stat.S_ISDIR(st.st_mode):
+ ret['state'] = 'directory'
+ elif stat.S_ISREG(st.st_mode):
+ ret['state'] = 'file'
+ ret['src'] = abspath
+ else:
+ display.warning('filetree: Error file type of %s is not supported' % abspath)
+ return None
+
+ ret['uid'] = st.st_uid
+ ret['gid'] = st.st_gid
+ try:
+ ret['owner'] = pwd.getpwuid(st.st_uid).pw_name
+ except KeyError:
+ ret['owner'] = st.st_uid
+ try:
+ ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name)
+ except KeyError:
+ ret['group'] = st.st_gid
+ ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode))
+ ret['size'] = st.st_size
+ ret['mtime'] = st.st_mtime
+ ret['ctime'] = st.st_ctime
+
+ if HAVE_SELINUX and selinux.is_selinux_enabled() == 1:
+ context = selinux_context(abspath)
+ ret['seuser'] = context[0]
+ ret['serole'] = context[1]
+ ret['setype'] = context[2]
+ ret['selevel'] = context[3]
+
+ return ret
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ basedir = self.get_basedir(variables)
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
+ path = os.path.join(dwimmed_path, term_file)
+ display.debug("Walking '{0}'".format(path))
+ for root, dirs, files in os.walk(path, topdown=True):
+ for entry in dirs + files:
+ relpath = os.path.relpath(os.path.join(root, entry), path)
+
+ # Skip if relpath was already processed (from another root)
+ if relpath not in [entry['path'] for entry in ret]:
+ props = file_props(path, relpath)
+ if props is not None:
+ display.debug(" found '{0}'".format(os.path.join(path, relpath)))
+ ret.append(props)
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py
new file mode 100644
index 00000000..d5616670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py
@@ -0,0 +1,84 @@
+# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: flattened
+ author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
+ short_description: return single list completely flattened
+ description:
+ - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
+ options:
+ _terms:
+ description: lists to flatten
+ required: True
+ notes:
+ - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore.
+ - aka highlander plugin, there can only be one (list).
+'''
+
+EXAMPLES = """
+- name: "'unnest' all elements into single list"
+ ansible.builtin.debug:
+ msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - flattened list
+ type: list
+"""
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+
+ def _check_list_of_one_list(self, term):
+ # make sure term is not a list of one (list of one..) item
+ # return the final non list item if so
+
+ if isinstance(term, list) and len(term) == 1:
+ term = term[0]
+ if isinstance(term, list):
+ term = self._check_list_of_one_list(term)
+
+ return term
+
+ def _do_flatten(self, terms, variables):
+
+ ret = []
+ for term in terms:
+ term = self._check_list_of_one_list(term)
+
+ if term == 'None' or term == 'null':
+ # ignore undefined items
+ break
+
+ if isinstance(term, string_types):
+ # convert a variable to a list
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
+ # but avoid converting a plain string to a list of one string
+ if term2 != [term]:
+ term = term2
+
+ if isinstance(term, list):
+ # if it's a list, check recursively for items that are a list
+ term = self._do_flatten(term, variables)
+ ret.extend(term)
+ else:
+ ret.append(term)
+
+ return ret
+
+ def run(self, terms, variables, **kwargs):
+
+ if not isinstance(terms, list):
+ raise AnsibleError("with_flattened expects a list")
+
+ return self._do_flatten(terms, variables)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py
new file mode 100644
index 00000000..206788c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py
@@ -0,0 +1,156 @@
+# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: gcp_storage_file
+description:
+ - This lookup returns the contents from a file residing on Google Cloud Storage
+short_description: Return GC Storage content
+author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
+requirements:
+ - python >= 2.6
+ - requests >= 2.18.4
+ - google-auth >= 1.3.0
+options:
+ src:
+ description:
+ - Source location of file (may be local machine or cloud depending on action).
+ required: false
+ bucket:
+ description:
+ - The name of the bucket.
+ required: false
+extends_documentation_fragment:
+- community.general._gcp
+
+'''
+
+EXAMPLES = '''
+- ansible.builtin.debug:
+ msg: |
+ the value of foo.txt is {{ lookup('community.general.gcp_storage_file',
+ bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
+ auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
+'''
+
+RETURN = '''
+_raw:
+ description:
+ - base64 encoded file content
+ type: list
+ elements: str
+'''
+
+import base64
+import json
+import mimetypes
+import os
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
+ HAS_GOOGLE_CLOUD_COLLECTION = True
+except ImportError:
+ HAS_GOOGLE_CLOUD_COLLECTION = False
+
+
+display = Display()
+
+
+class GcpMockModule(object):
+ def __init__(self, params):
+ self.params = params
+
+ def fail_json(self, *args, **kwargs):
+ raise AnsibleError(kwargs['msg'])
+
+ def raise_for_status(self, response):
+ try:
+ response.raise_for_status()
+ except getattr(requests.exceptions, 'RequestException'):
+ self.fail_json(msg="GCP returned error: %s" % response.json())
+
+
+class GcpFileLookup():
+ def get_file_contents(self, module):
+ auth = GcpSession(module, 'storage')
+ data = auth.get(self.media_link(module))
+ return base64.b64encode(data.content.rstrip())
+
+ def fetch_resource(self, module, link, allow_not_found=True):
+ auth = GcpSession(module, 'storage')
+ return self.return_if_object(module, auth.get(link), allow_not_found)
+
+ def self_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
+
+ def media_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
+
+ def return_if_object(self, module, response, allow_not_found=False):
+ # If not found, return nothing.
+ if allow_not_found and response.status_code == 404:
+ return None
+ # If no content, return nothing.
+ if response.status_code == 204:
+ return None
+ try:
+ module.raise_for_status(response)
+ result = response.json()
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
+ raise AnsibleError("Invalid JSON response with error: %s" % inst)
+ if navigate_hash(result, ['error', 'errors']):
+ raise AnsibleError(navigate_hash(result, ['error', 'errors']))
+ return result
+
+ def object_headers(self, module):
+ return {
+ "name": module.params['src'],
+ "Content-Type": mimetypes.guess_type(module.params['src'])[0],
+ "Content-Length": str(os.path.getsize(module.params['src'])),
+ }
+
+ def run(self, terms, variables=None, **kwargs):
+ params = {
+ 'bucket': kwargs.get('bucket', None),
+ 'src': kwargs.get('src', None),
+ 'projects': kwargs.get('projects', None),
+ 'scopes': kwargs.get('scopes', None),
+ 'zones': kwargs.get('zones', None),
+ 'auth_kind': kwargs.get('auth_kind', None),
+ 'service_account_file': kwargs.get('service_account_file', None),
+ 'service_account_email': kwargs.get('service_account_email', None),
+ }
+
+ if not params['scopes']:
+ params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
+
+ fake_module = GcpMockModule(params)
+
+ # Check if files exist.
+ remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
+ if not remote_object:
+ raise AnsibleError("File does not exist in bucket")
+
+ result = self.get_file_contents(fake_module)
+ return [result]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_GOOGLE_CLOUD_COLLECTION:
+ raise AnsibleError("community.general.gcp_storage_file needs a supported version of the google.cloud collection installed")
+ if not HAS_REQUESTS:
+ raise AnsibleError("community.general.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
+ return GcpFileLookup().run(terms, variables=variables, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py
new file mode 100644
index 00000000..a4da243a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py
@@ -0,0 +1,650 @@
+# (c) 2020, Brian Scholer (@briantist)
+# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: hashi_vault
+ author:
+ - Jonathan Davila (!UNKNOWN) <jdavila(at)ansible.com>
+ - Brian Scholer (@briantist)
+ short_description: Retrieve secrets from HashiCorp's Vault
+ requirements:
+ - hvac (python library)
+ - hvac 0.7.0+ (for namespace support)
+ - hvac 0.9.6+ (to avoid all deprecation warnings)
+ - botocore (only if inferring aws params from boto)
+ - boto3 (only if using a boto profile)
+ description:
+ - Retrieve secrets from HashiCorp's Vault.
+ notes:
+ - Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
+ - As of community.general 0.2.0, only the latest version of a secret is returned when specifying a KV v2 path.
+ - As of community.general 0.2.0, all options can be supplied via term string (space delimited key=value pairs) or by parameters (see examples).
+ - As of community.general 0.2.0, when C(secret) is the first option in the term string, C(secret=) is not required (see examples).
+ options:
+ secret:
+ description: Vault path to the secret being requested in the format C(path[:field]).
+ required: True
+ token:
+ description:
+ - Vault token. If using token auth and no token is supplied, explicitly or through env, then the plugin will check
+ - for a token file, as determined by C(token_path) and C(token_file).
+ env:
+ - name: VAULT_TOKEN
+ token_path:
+ description: If no token is specified, will try to read the token file from this path.
+ env:
+ - name: VAULT_TOKEN_PATH
+ version_added: 1.2.0
+ ini:
+ - section: lookup_hashi_vault
+ key: token_path
+ version_added: '0.2.0'
+ token_file:
+ description: If no token is specified, will try to read the token from this file in C(token_path).
+ env:
+ - name: VAULT_TOKEN_FILE
+ version_added: 1.2.0
+ ini:
+ - section: lookup_hashi_vault
+ key: token_file
+ default: '.vault-token'
+ version_added: '0.2.0'
+ url:
+ description: URL to the Vault service.
+ env:
+ - name: VAULT_ADDR
+ ini:
+ - section: lookup_hashi_vault
+ key: url
+ version_added: '0.2.0'
+ default: 'http://127.0.0.1:8200'
+ username:
+ description: Authentication user name.
+ password:
+ description: Authentication password.
+ role_id:
+ description: Vault Role ID. Used in approle and aws_iam_login auth methods.
+ env:
+ - name: VAULT_ROLE_ID
+ ini:
+ - section: lookup_hashi_vault
+ key: role_id
+ version_added: '0.2.0'
+ secret_id:
+ description: Secret ID to be used for Vault AppRole authentication.
+ env:
+ - name: VAULT_SECRET_ID
+ auth_method:
+ description:
+ - Authentication method to be used.
+ - C(userpass) is added in Ansible 2.8.
+ - C(aws_iam_login) is added in community.general 0.2.0.
+ - C(jwt) is added in community.general 1.3.0.
+ env:
+ - name: VAULT_AUTH_METHOD
+ ini:
+ - section: lookup_hashi_vault
+ key: auth_method
+ version_added: '0.2.0'
+ choices:
+ - token
+ - userpass
+ - ldap
+ - approle
+ - aws_iam_login
+ - jwt
+ default: token
+ return_format:
+ description:
+ - Controls how multiple key/value pairs in a path are treated on return.
+ - C(dict) returns a single dict containing the key/value pairs (same behavior as before community.general 0.2.0).
+ - C(values) returns a list of all the values only. Use when you don't care about the keys.
+ - C(raw) returns the actual API result, which includes metadata and may have the data nested in other keys.
+ choices:
+ - dict
+ - values
+ - raw
+ default: dict
+ aliases: [ as ]
+ version_added: '0.2.0'
+ mount_point:
+ description: Vault mount point, only required if you have a custom mount point. Does not apply to token authentication.
+ jwt:
+ description: The JSON Web Token (JWT) to use for JWT authentication to Vault.
+ env:
+ - name: ANSIBLE_HASHI_VAULT_JWT
+ version_added: 1.3.0
+ ca_cert:
+ description: Path to certificate to use for authentication.
+ aliases: [ cacert ]
+ validate_certs:
+ description:
+ - Controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
+ - Will be populated with the inverse of C(VAULT_SKIP_VERIFY) if that is set and I(validate_certs) is not explicitly
+ provided (added in community.general 1.3.0).
+ - Will default to C(true) if neither I(validate_certs) or C(VAULT_SKIP_VERIFY) are set.
+ type: boolean
+ namespace:
+ description:
+ - Vault namespace where secrets reside. This option requires HVAC 0.7.0+ and Vault 0.11+.
+ - Optionally, this may be achieved by prefixing the authentication mount point and/or secret path with the namespace
+ (e.g C(mynamespace/secret/mysecret)).
+ env:
+ - name: VAULT_NAMESPACE
+ version_added: 1.2.0
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ version_added: '0.2.0'
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ version_added: '0.2.0'
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ version_added: '0.2.0'
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+ version_added: '0.2.0'
+ region:
+ description: The AWS region for which to create the connection.
+ type: str
+ env:
+ - name: EC2_REGION
+ - name: AWS_REGION
+ version_added: '0.2.0'
+"""
+
+EXAMPLES = """
+- ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
+
+- name: Return all secrets from a path
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
+
+- name: Vault that requires authentication via LDAP
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas') }}"
+
+- name: Vault that requires authentication via username and password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=psw url=http://myvault:8200') }}"
+
+- name: Connect to Vault using TLS
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 validate_certs=False') }}"
+
+- name: using certificate auth
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hi:value token=xxxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem') }}"
+
+- name: Authenticate with a Vault app role
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid') }}"
+
+- name: Return all secrets from a path in a namespace
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 namespace=teama/admins') }}"
+
+# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
+# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
+- name: Return latest KV v2 secret from path
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
+
+# The following examples work in collection releases after community.general 0.2.0
+
+- name: secret= is not required if secret is first
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello token=<token> url=http://myvault_url:8200') }}"
+
+- name: options can be specified as parameters rather than put in term string
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello', token=my_token_var, url='http://myvault_url:8200') }}"
+
+# return_format (or its alias 'as') can control how secrets are returned to you
+- name: return secrets as a dict (default)
+ ansible.builtin.set_fact:
+ my_secrets: "{{ lookup('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200') }}"
+- ansible.builtin.debug:
+ msg: "{{ my_secrets['secret_key'] }}"
+- ansible.builtin.debug:
+ msg: "Secret '{{ item.key }}' has value '{{ item.value }}'"
+ loop: "{{ my_secrets | dict2items }}"
+
+- name: return secrets as values only
+ ansible.builtin.debug:
+ msg: "A secret value: {{ item }}"
+ loop: "{{ query('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200', return_format='values') }}"
+
+- name: return raw secret from API, including metadata
+ ansible.builtin.set_fact:
+ my_secret: "{{ lookup('community.general.hashi_vault', 'secret/data/hello:value', token=my_token_var, url='http://myvault_url:8200', as='raw') }}"
+- ansible.builtin.debug:
+ msg: "This is version {{ my_secret['metadata']['version'] }} of hello:value. The secret data is {{ my_secret['data']['data']['value'] }}"
+
+# AWS IAM authentication method
+# uses Ansible standard AWS options
+
+- name: authenticate with aws_iam_login
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='aws_iam_login', role_id='myroleid', profile=my_boto_profile) }}"
+
+# The following examples work in collection releases after community.general 1.3.0
+
+- name: Authenticate with a JWT
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='jwt', role_id='myroleid', jwt='myjwt', url='https://myvault:8200')}}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - secrets(s) requested
+ type: list
+ elements: dict
+"""
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.module_utils.parsing.convert_bool import boolean
+
+HAS_HVAC = False
+try:
+ import hvac
+ HAS_HVAC = True
+except ImportError:
+ HAS_HVAC = False
+
+HAS_BOTOCORE = False
+try:
+ # import boto3
+ import botocore
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+HAS_BOTO3 = False
+try:
+ import boto3
+ # import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+class HashiVault:
+ def get_options(self, *option_names, **kwargs):
+ ret = {}
+ include_falsey = kwargs.get('include_falsey', False)
+ for option in option_names:
+ val = self.options.get(option)
+ if val or include_falsey:
+ ret[option] = val
+ return ret
+
+ def __init__(self, **kwargs):
+ self.options = kwargs
+
+ # check early that auth method is actually available
+ self.auth_function = 'auth_' + self.options['auth_method']
+ if not (hasattr(self, self.auth_function) and callable(getattr(self, self.auth_function))):
+ raise AnsibleError(
+ "Authentication method '%s' is not implemented. ('%s' member function not found)" % (self.options['auth_method'], self.auth_function)
+ )
+
+ client_args = {
+ 'url': self.options['url'],
+ 'verify': self.options['ca_cert']
+ }
+
+ if self.options.get('namespace'):
+ client_args['namespace'] = self.options['namespace']
+
+ # this is the only auth_method-specific thing here, because if we're using a token, we need it now
+ if self.options['auth_method'] == 'token':
+ client_args['token'] = self.options.get('token')
+
+ self.client = hvac.Client(**client_args)
+
+ # Check for old version, before auth_methods class (added in 0.7.0):
+ # https://github.com/hvac/hvac/releases/tag/v0.7.0
+ #
+ # hvac is moving auth methods into the auth_methods class
+ # which lives in the client.auth member.
+ #
+ # Attempting to find which backends were moved into the class when (this is primarily for warnings):
+ # 0.7.0 -- github, ldap, mfa, azure?, gcp
+ # 0.7.1 -- okta
+ # 0.8.0 -- kubernetes
+ # 0.9.0 -- azure?, radius
+ # 0.9.3 -- aws
+ # 0.9.6 -- userpass
+ self.hvac_has_auth_methods = hasattr(self.client, 'auth')
+
+ # We've already checked to ensure a method exists for a particular auth_method, of the form:
+ #
+ # auth_<method_name>
+ #
+ def authenticate(self):
+ getattr(self, self.auth_function)()
+
+ def get(self):
+ '''gets a secret. should always return a list'''
+ secret = self.options['secret']
+ field = self.options['secret_field']
+ return_as = self.options['return_format']
+
+ try:
+ data = self.client.read(secret)
+ except hvac.exceptions.Forbidden:
+ raise AnsibleError("Forbidden: Permission Denied to secret '%s'." % secret)
+
+ if data is None:
+ raise AnsibleError("The secret '%s' doesn't seem to exist." % secret)
+
+ if return_as == 'raw':
+ return [data]
+
+ # Check response for KV v2 fields and flatten nested secret data.
+ # https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
+ try:
+ # sentinel field checks
+ check_dd = data['data']['data']
+ check_md = data['data']['metadata']
+ # unwrap nested data
+ data = data['data']
+ except KeyError:
+ pass
+
+ if return_as == 'values':
+ return list(data['data'].values())
+
+ # everything after here implements return_as == 'dict'
+ if not field:
+ return [data['data']]
+
+ if field not in data['data']:
+ raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (secret, field))
+
+ return [data['data'][field]]
+
+ # begin auth implementation methods
+ #
+ # To add new backends, 3 things should be added:
+ #
+ # 1. Add a new validate_auth_<method_name> method to the LookupModule, which is responsible for validating
+ # that it has the necessary options and whatever else it needs.
+ #
+ # 2. Add a new auth_<method_name> method to this class. These implementations are faily minimal as they should
+ # already have everything they need. This is also the place to check for deprecated auth methods as hvac
+ # continues to move backends into the auth_methods class.
+ #
+ # 3. Update the avail_auth_methods list in the LookupModules auth_methods() method (for now this is static).
+ #
+ def auth_token(self):
+ if not self.client.is_authenticated():
+ raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup.")
+
+ def auth_userpass(self):
+ params = self.get_options('username', 'password', 'mount_point')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.userpass, 'login'):
+ self.client.auth.userpass.login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.9.6 or higher. Deprecated method 'auth_userpass' will be used.")
+ self.client.auth_userpass(**params)
+
+ def auth_ldap(self):
+ params = self.get_options('username', 'password', 'mount_point')
+# not hasattr(self.client, 'auth')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.ldap, 'login'):
+ self.client.auth.ldap.login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.7.0 or higher. Deprecated method 'auth_ldap' will be used.")
+ self.client.auth_ldap(**params)
+
+ def auth_approle(self):
+ params = self.get_options('role_id', 'secret_id', 'mount_point')
+ self.client.auth_approle(**params)
+
+ def auth_aws_iam_login(self):
+ params = self.options['iam_login_credentials']
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.aws, 'iam_login'):
+ self.client.auth.aws.iam_login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.9.3 or higher. Deprecated method 'auth_aws_iam' will be used.")
+ self.client.auth_aws_iam(**params)
+
+ def auth_jwt(self):
+ params = self.get_options('role_id', 'jwt', 'mount_point')
+ params['role'] = params.pop('role_id')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth, 'jwt') and hasattr(self.client.auth.jwt, 'jwt_login'):
+ response = self.client.auth.jwt.jwt_login(**params)
+ # must manually set the client token with JWT login
+ # see https://github.com/hvac/hvac/issues/644
+ self.client.token = response['auth']['client_token']
+ else:
+ raise AnsibleError("JWT authentication requires HVAC version 0.10.5 or higher.")
+
+ # end auth implementation methods
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_HVAC:
+ raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
+
+ ret = []
+
+ for term in terms:
+ opts = kwargs.copy()
+ opts.update(self.parse_term(term))
+ self.set_options(direct=opts)
+ self.process_options()
+ # FUTURE: Create one object, authenticate once, and re-use it,
+ # for gets, for better use during with_ loops.
+ client = HashiVault(**self._options)
+ client.authenticate()
+ ret.extend(client.get())
+
+ return ret
+
+ def parse_term(self, term):
+ '''parses a term string into options'''
+ param_dict = {}
+
+ for i, param in enumerate(term.split()):
+ try:
+ key, value = param.split('=', 1)
+ except ValueError:
+ if (i == 0):
+ # allow secret to be specified as value only if it's first
+ key = 'secret'
+ value = param
+ else:
+ raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % term)
+ param_dict[key] = value
+ return param_dict
+
+ def process_options(self):
+ '''performs deep validation and value loading for options'''
+
+ # ca_cert to verify
+ self.boolean_or_cacert()
+
+ # auth methods
+ self.auth_methods()
+
+ # secret field splitter
+ self.field_ops()
+
+ # begin options processing methods
+
+ def boolean_or_cacert(self):
+ # This is needed because of this (https://hvac.readthedocs.io/en/stable/source/hvac_v1.html):
+ #
+ # # verify (Union[bool,str]) - Either a boolean to indicate whether TLS verification should
+ # # be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification.
+ #
+ '''' return a bool or cacert '''
+ ca_cert = self.get_option('ca_cert')
+
+ validate_certs = self.get_option('validate_certs')
+
+ if validate_certs is None:
+ # Validate certs option was not explicitly set
+
+ # Check if VAULT_SKIP_VERIFY is set
+ vault_skip_verify = os.environ.get('VAULT_SKIP_VERIFY')
+
+ if vault_skip_verify is not None:
+ # VAULT_SKIP_VERIFY is set
+ try:
+ # Check that we have a boolean value
+ vault_skip_verify = boolean(vault_skip_verify)
+ # Use the inverse of VAULT_SKIP_VERIFY
+ validate_certs = not vault_skip_verify
+ except TypeError:
+ # Not a boolean value fallback to default value (True)
+ validate_certs = True
+ else:
+ validate_certs = True
+
+ if not (validate_certs and ca_cert):
+ self.set_option('ca_cert', validate_certs)
+
+ def field_ops(self):
+ # split secret and field
+ secret = self.get_option('secret')
+
+ s_f = secret.rsplit(':', 1)
+ self.set_option('secret', s_f[0])
+ if len(s_f) >= 2:
+ field = s_f[1]
+ else:
+ field = None
+ self.set_option('secret_field', field)
+
+ def auth_methods(self):
+ # enforce and set the list of available auth methods
+ # TODO: can this be read from the choices: field in documentation?
+ avail_auth_methods = ['token', 'approle', 'userpass', 'ldap', 'aws_iam_login', 'jwt']
+ self.set_option('avail_auth_methods', avail_auth_methods)
+ auth_method = self.get_option('auth_method')
+
+ if auth_method not in avail_auth_methods:
+ raise AnsibleError(
+ "Authentication method '%s' not supported. Available options are %r" % (auth_method, avail_auth_methods)
+ )
+
+ # run validator if available
+ auth_validator = 'validate_auth_' + auth_method
+ if hasattr(self, auth_validator) and callable(getattr(self, auth_validator)):
+ getattr(self, auth_validator)(auth_method)
+
+ # end options processing methods
+
+ # begin auth method validators
+
+ def validate_by_required_fields(self, auth_method, *field_names):
+ missing = [field for field in field_names if not self.get_option(field)]
+
+ if missing:
+ raise AnsibleError("Authentication method %s requires options %r to be set, but these are missing: %r" % (auth_method, field_names, missing))
+
+ def validate_auth_userpass(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'username', 'password')
+
+ def validate_auth_ldap(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'username', 'password')
+
+ def validate_auth_approle(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'role_id')
+
+ def validate_auth_token(self, auth_method):
+ if auth_method == 'token':
+ if not self.get_option('token_path'):
+ # generally we want env vars defined in the spec, but in this case we want
+ # the env var HOME to have lower precedence than any other value source,
+ # including ini, so we're doing it here after all other processing has taken place
+ self.set_option('token_path', os.environ.get('HOME'))
+ if not self.get_option('token') and self.get_option('token_path'):
+ token_filename = os.path.join(
+ self.get_option('token_path'),
+ self.get_option('token_file')
+ )
+ if os.path.exists(token_filename):
+ with open(token_filename) as token_file:
+ self.set_option('token', token_file.read().strip())
+
+ if not self.get_option('token'):
+ raise AnsibleError("No Vault Token specified or discovered.")
+
+ def validate_auth_aws_iam_login(self, auth_method):
+ params = {
+ 'access_key': self.get_option('aws_access_key'),
+ 'secret_key': self.get_option('aws_secret_key')
+ }
+
+ if self.get_option('role_id'):
+ params['role'] = self.get_option('role_id')
+
+ if self.get_option('region'):
+ params['region'] = self.get_option('region')
+
+ if not (params['access_key'] and params['secret_key']):
+ profile = self.get_option('aws_profile')
+ if profile:
+ # try to load boto profile
+ if not HAS_BOTO3:
+ raise AnsibleError("boto3 is required for loading a boto profile.")
+ session_credentials = boto3.session.Session(profile_name=profile).get_credentials()
+ else:
+ # try to load from IAM credentials
+ if not HAS_BOTOCORE:
+ raise AnsibleError("botocore is required for loading IAM role credentials.")
+ session_credentials = botocore.session.get_session().get_credentials()
+
+ if not session_credentials:
+ raise AnsibleError("No AWS credentials supplied or available.")
+
+ params['access_key'] = session_credentials.access_key
+ params['secret_key'] = session_credentials.secret_key
+ if session_credentials.token:
+ params['session_token'] = session_credentials.token
+
+ self.set_option('iam_login_credentials', params)
+
+ def validate_auth_jwt(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'role_id', 'jwt')
+
+ # end auth method validators
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py
new file mode 100644
index 00000000..09b7c0a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py
@@ -0,0 +1,90 @@
+# (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Juan Manuel Parrilla (@jparrill)
+ lookup: hiera
+ short_description: get info from hiera data
+ requirements:
+ - hiera (command line utility)
+ description:
+ - Retrieves data from an Puppetmaster node using Hiera as ENC
+ options:
+ _hiera_key:
+ description:
+ - The list of keys to lookup on the Puppetmaster
+ type: list
+ elements: string
+ required: True
+ _bin_file:
+ description:
+ - Binary file to execute Hiera
+ default: '/usr/bin/hiera'
+ env:
+ - name: ANSIBLE_HIERA_BIN
+ _hierarchy_file:
+ description:
+ - File that describes the hierarchy of Hiera
+ default: '/etc/hiera.yaml'
+ env:
+ - name: ANSIBLE_HIERA_CFG
+# FIXME: incomplete options .. _terms? environment/fqdn?
+'''
+
+EXAMPLES = """
+# All this examples depends on hiera.yml that describes the hierarchy
+
+- name: "a value from Hiera 'DB'"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo') }}"
+
+- name: "a value from a Hiera 'DB' on other environment"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
+
+- name: "a value from a Hiera 'DB' for a concrete node"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - a value associated with input key
+ type: list
+ elements: str
+"""
+
+import os
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.cmd_functions import run_cmd
+
+ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
+ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
+
+
+class Hiera(object):
+ def get(self, hiera_key):
+ pargs = [ANSIBLE_HIERA_BIN]
+ pargs.extend(['-c', ANSIBLE_HIERA_CFG])
+
+ pargs.extend(hiera_key)
+
+ rc, output, err = run_cmd("{0} -c {1} {2}".format(
+ ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
+
+ return output.strip()
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=''):
+ hiera = Hiera()
+ ret = []
+
+ ret.append(hiera.get(terms))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py
new file mode 100644
index 00000000..0472dfbb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py
@@ -0,0 +1,67 @@
+# (c) 2016, Samuel Boucher <boucher.samuel.c@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: keyring
+ author:
+ - Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
+ requirements:
+ - keyring (python library)
+ short_description: grab secrets from the OS keyring
+ description:
+ - Allows you to access data stored in the OS provided keyring/keychain.
+'''
+
+EXAMPLES = """
+- name : output secrets to screen (BAD IDEA)
+ ansible.builtin.debug:
+ msg: "Password: {{item}}"
+ with_community.general.keyring:
+ - 'servicename username'
+
+- name: access mysql with password from keyring
+ mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe
+"""
+
+RETURN = """
+ _raw:
+ description: Secrets stored.
+ type: list
+ elements: str
+"""
+
+HAS_KEYRING = True
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+try:
+ import keyring
+except ImportError:
+ HAS_KEYRING = False
+
+from ansible.plugins.lookup import LookupBase
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+ if not HAS_KEYRING:
+ raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
+
+ display.vvvv(u"keyring: %s" % keyring.get_keyring())
+ ret = []
+ for term in terms:
+ (servicename, username) = (term.split()[0], term.split()[1])
+ display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
+ password = keyring.get_password(servicename, username)
+ if password is None:
+ raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
+ ret.append(password.rstrip())
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py
new file mode 100644
index 00000000..43e99986
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py
@@ -0,0 +1,99 @@
+# (c) 2016, Andrew Zenk <azenk@umn.edu>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: lastpass
+ author:
+ - Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
+ requirements:
+ - lpass (command line utility)
+ - must have already logged into lastpass
+ short_description: fetch data from lastpass
+ description:
+ - use the lpass command line utility to fetch specific fields from lastpass
+ options:
+ _terms:
+ description: key from which you want to retrieve the field
+ required: True
+ field:
+ description: field to return from lastpass
+ default: 'password'
+'''
+
+EXAMPLES = """
+- name: get 'custom_field' from lastpass entry 'entry-name'
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
+"""
+
+RETURN = """
+ _raw:
+ description: secrets stored
+ type: list
+ elements: str
+"""
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.lookup import LookupBase
+
+
+class LPassException(AnsibleError):
+ pass
+
+
+class LPass(object):
+
+ def __init__(self, path='lpass'):
+ self._cli_path = path
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ @property
+ def logged_in(self):
+ out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
+ return err.startswith("Are you sure you would like to log out?")
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(to_bytes(stdin))
+ rc = p.wait()
+ if rc != expected_rc:
+ raise LPassException(err)
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
+
+ def _build_args(self, command, args=None):
+ if args is None:
+ args = []
+ args = [command] + args
+ args += ["--color=never"]
+ return args
+
+ def get_field(self, key, field):
+ if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
+ out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
+ else:
+ out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
+ return out.strip()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ lp = LPass()
+
+ if not lp.logged_in:
+ raise AnsibleError("Not logged into lastpass: please run 'lpass login' first")
+
+ field = kwargs.get('field', 'password')
+ values = []
+ for term in terms:
+ values.append(lp.get_field(term, field))
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
new file mode 100644
index 00000000..3764a43c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
@@ -0,0 +1,120 @@
+# (c) 2017-2018, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: lmdb_kv
+ author:
+ - Jan-Piet Mens (@jpmens)
+ version_added: '0.2.0'
+ short_description: fetch data from LMDB
+ description:
+ - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it
+ requirements:
+ - lmdb (python library https://lmdb.readthedocs.io/en/release/)
+ options:
+ _terms:
+ description: list of keys to query
+ db:
+ description: path to LMDB database
+ default: 'ansible.mdb'
+'''
+
+EXAMPLES = """
+- name: query LMDB for a list of country codes
+ ansible.builtin.debug:
+ msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
+
+- name: use list of values in a loop by key wildcard
+ ansible.builtin.debug:
+ msg: "Hello from {{ item.0 }} a.k.a. {{ item.1 }}"
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - "n*"
+
+- name: get an item by key
+ ansible.builtin.assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in LMDB
+ type: list
+ elements: raw
+"""
+
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+HAVE_LMDB = True
+try:
+ import lmdb
+except ImportError:
+ HAVE_LMDB = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ '''
+ terms contain any number of keys to be retrieved.
+ If terms is None, all keys from the database are returned
+ with their values, and if term ends in an asterisk, we
+ start searching there
+
+ The LMDB database defaults to 'ansible.mdb' if Ansible's
+ variable 'lmdb_kv_db' is not set:
+
+ vars:
+ - lmdb_kv_db: "jp.mdb"
+ '''
+
+ if HAVE_LMDB is False:
+ raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed")
+
+ db = variables.get('lmdb_kv_db', None)
+ if db is None:
+ db = kwargs.get('db', 'ansible.mdb')
+ db = str(db)
+
+ try:
+ env = lmdb.open(db, readonly=True)
+ except Exception as e:
+ raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e)))
+
+ ret = []
+ if len(terms) == 0:
+ with env.begin() as txn:
+ cursor = txn.cursor()
+ cursor.first()
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+
+ else:
+ for term in terms:
+ with env.begin() as txn:
+ if term.endswith('*'):
+ cursor = txn.cursor()
+ prefix = term[:-1] # strip asterisk
+ cursor.set_range(to_text(term).encode())
+ while cursor.key().startswith(to_text(prefix).encode()):
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+ cursor.next()
+ else:
+ value = txn.get(to_text(term).encode())
+ if value is not None:
+ ret.append(to_native(value))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py
new file mode 100644
index 00000000..f7f843a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py
@@ -0,0 +1,278 @@
+# (c) 2018, Arigato Machine Inc.
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
+ lookup: manifold
+ short_description: get credentials from Manifold.co
+ description:
+ - Retrieves resources' credentials from Manifold.co
+ options:
+ _terms:
+ description:
+ - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
+ matched resources will be returned.
+ type: list
+ elements: string
+ required: False
+ api_token:
+ description:
+ - manifold API token
+ type: string
+ required: True
+ env:
+ - name: MANIFOLD_API_TOKEN
+ project:
+ description:
+ - The project label you want to get the resource for.
+ type: string
+ required: False
+ team:
+ description:
+ - The team label you want to get the resource for.
+ type: string
+ required: False
+'''
+
+EXAMPLES = '''
+ - name: all available resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
+ - name: all available resources for a specific project in specific team
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
+ - name: two specific resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - dictionary of credentials ready to be consumed as environment variables. If multiple resources define
+ the same environment variable(s), the last one returned by the Manifold API will take precedence.
+ type: dict
+'''
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils import six
+from ansible.utils.display import Display
+from traceback import format_exception
+import json
+import sys
+import os
+
+display = Display()
+
+
+class ApiError(Exception):
+ pass
+
+
+class ManifoldApiClient(object):
+ base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
+ http_agent = 'python-manifold-ansible-1.0.0'
+
+ def __init__(self, token):
+ self._token = token
+
+ def request(self, api, endpoint, *args, **kwargs):
+ """
+ Send a request to API backend and pre-process a response.
+ :param api: API to send a request to
+ :type api: str
+ :param endpoint: API endpoint to fetch data from
+ :type endpoint: str
+ :param args: other args for open_url
+ :param kwargs: other kwargs for open_url
+ :return: server response. JSON response is automatically deserialized.
+ :rtype: dict | list | str
+ """
+
+ default_headers = {
+ 'Authorization': "Bearer {0}".format(self._token),
+ 'Accept': "*/*" # Otherwise server doesn't set content-type header
+ }
+
+ url = self.base_url.format(api=api, endpoint=endpoint)
+
+ headers = default_headers
+ arg_headers = kwargs.pop('headers', None)
+ if arg_headers:
+ headers.update(arg_headers)
+
+ try:
+ display.vvvv('manifold lookup connecting to {0}'.format(url))
+ response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
+ data = response.read()
+ if response.headers.get('content-type') == 'application/json':
+ data = json.loads(data)
+ return data
+ except ValueError:
+ raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
+ except HTTPError as e:
+ raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
+ err=str(e), url=url, response=e.read()))
+ except URLError as e:
+ raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
+ except SSLValidationError as e:
+ raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
+ except ConnectionError as e:
+ raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
+
+ def get_resources(self, team_id=None, project_id=None, label=None):
+ """
+ Get resources list
+ :param team_id: ID of the Team to filter resources by
+ :type team_id: str
+ :param project_id: ID of the project to filter resources by
+ :type project_id: str
+ :param label: filter resources by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of resources
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'resources'
+ query_params = {}
+
+ if team_id:
+ query_params['team_id'] = team_id
+ if project_id:
+ query_params['project_id'] = project_id
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_teams(self, label=None):
+ """
+ Get teams list
+ :param label: filter teams by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of teams
+ :rtype: list
+ """
+ api = 'identity'
+ endpoint = 'teams'
+ data = self.request(api, endpoint)
+ # Label filtering is not supported by API, however this function provides uniform interface
+ if label:
+ data = list(filter(lambda x: x['body']['label'] == label, data))
+ return data
+
+ def get_projects(self, label=None):
+ """
+ Get projects list
+ :param label: filter projects by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of projects
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'projects'
+ query_params = {}
+
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_credentials(self, resource_id):
+ """
+ Get resource credentials
+ :param resource_id: ID of the resource to filter credentials by
+ :type resource_id: str
+ :return:
+ """
+ api = 'marketplace'
+ endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
+ return self.request(api, endpoint)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, api_token=None, project=None, team=None):
+ """
+ :param terms: a list of resources lookups to run.
+ :param variables: ansible variables active at the time of the lookup
+ :param api_token: API token
+ :param project: optional project label
+ :param team: optional team label
+ :return: a dictionary of resources credentials
+ """
+
+ if not api_token:
+ api_token = os.getenv('MANIFOLD_API_TOKEN')
+ if not api_token:
+ raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var')
+
+ try:
+ labels = terms
+ client = ManifoldApiClient(api_token)
+
+ if team:
+ team_data = client.get_teams(team)
+ if len(team_data) == 0:
+ raise AnsibleError("Team '{0}' does not exist".format(team))
+ team_id = team_data[0]['id']
+ else:
+ team_id = None
+
+ if project:
+ project_data = client.get_projects(project)
+ if len(project_data) == 0:
+ raise AnsibleError("Project '{0}' does not exist".format(project))
+ project_id = project_data[0]['id']
+ else:
+ project_id = None
+
+ if len(labels) == 1: # Use server-side filtering if one resource is requested
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
+ else: # Get all resources and optionally filter labels
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id)
+ if labels:
+ resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
+
+ if labels and len(resources_data) < len(labels):
+ fetched_labels = [r['body']['label'] for r in resources_data]
+ not_found_labels = [label for label in labels if label not in fetched_labels]
+ raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
+
+ credentials = {}
+ cred_map = {}
+ for resource in resources_data:
+ resource_credentials = client.get_credentials(resource['id'])
+ if len(resource_credentials) and resource_credentials[0]['body']['values']:
+ for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
+ label = resource['body']['label']
+ if cred_key in credentials:
+ display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
+ "with label '{new_label}'".format(cred_key=cred_key,
+ old_label=cred_map[cred_key],
+ new_label=label))
+ credentials[cred_key] = cred_val
+ cred_map[cred_key] = label
+
+ ret = [credentials]
+ return ret
+ except ApiError as e:
+ raise AnsibleError('API Error: {0}'.format(str(e)))
+ except AnsibleError as e:
+ raise e
+ except Exception:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py
new file mode 100644
index 00000000..c9a3c34b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py
@@ -0,0 +1,121 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios
+short_description: Query Infoblox NIOS objects
+description:
+ - Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
+ supports adding additional keywords to filter the return data and specify
+ the desired set of returned fields.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The name of the object to return from NIOS
+ required: True
+ return_fields:
+ description: The list of field names to return for the specified object.
+ filter:
+ description: a dict object that is used to filter the return objects
+ extattrs:
+ description: a dict object that is used to filter on extattrs
+'''
+
+EXAMPLES = """
+- name: fetch all networkview objects
+ ansible.builtin.set_fact:
+ networkviews: "{{ lookup('community.general.nios', 'networkview',
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: fetch the default dns view
+ ansible.builtin.set_fact:
+ dns_views: "{{ lookup('community.general.nios', 'view', filter={'name': 'default'},
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+# all of the examples below use credentials that are set using env variables
+# export INFOBLOX_HOST=nios01
+# export INFOBLOX_USERNAME=admin
+# export INFOBLOX_PASSWORD=admin
+
+- name: fetch all host records and include extended attributes
+ ansible.builtin.set_fact:
+ host_records: "{{ lookup('community.general.nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
+
+
+- name: use env variables to pass credentials
+ ansible.builtin.set_fact:
+ networkviews: "{{ lookup('community.general.nios', 'networkview') }}"
+
+- name: get a host record
+ ansible.builtin.set_fact:
+ host: "{{ lookup('community.general.nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
+
+- name: get the authoritative zone from a non default dns view
+ ansible.builtin.set_fact:
+ host: "{{ lookup('community.general.nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
+"""
+
+RETURN = """
+obj_type:
+ description:
+ - The object type specified in the terms argument
+ type: dictionary
+ contains:
+ obj_field:
+ description:
+ - One or more obj_type fields as specified by return_fields argument or
+ the default set of fields as per the object type
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ obj_type = terms[0]
+ except IndexError:
+ raise AnsibleError('the object_type must be specified')
+
+ return_fields = kwargs.pop('return_fields', None)
+ filter_data = kwargs.pop('filter', {})
+ extattrs = normalize_extattrs(kwargs.pop('extattrs', {}))
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+ res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs)
+ if res is not None:
+ for obj in res:
+ if 'extattrs' in obj:
+ obj['extattrs'] = flatten_extattrs(obj['extattrs'])
+ else:
+ res = []
+ return res
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py
new file mode 100644
index 00000000..20d28523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py
@@ -0,0 +1,100 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios_next_ip
+short_description: Return the next available IP address for a network
+description:
+ - Uses the Infoblox WAPI API to return the next available IP addresses
+ for a given network CIDR
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The CIDR network to retrieve the next addresses from
+ required: True
+ num:
+ description: The number of IP addresses to return
+ required: false
+ default: 1
+ exclude:
+ description: List of IP's that need to be excluded from returned IP addresses
+ required: false
+'''
+
+EXAMPLES = """
+- name: return next available IP address for network 192.168.10.0/24
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 3 available IP addresses for network 192.168.10.0/24
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+"""
+
+RETURN = """
+_list:
+ description:
+ - The list of next IP addresses available
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ network = terms[0]
+ except IndexError:
+ raise AnsibleError('missing argument in the form of A.B.C.D/E')
+
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+
+ network_obj = wapi.get_object('network', {'network': network})
+ if network_obj is None:
+ raise AnsibleError('unable to find network object %s' % network)
+
+ num = kwargs.get('num', 1)
+ exclude_ip = kwargs.get('exclude', [])
+
+ try:
+ ref = network_obj[0]['_ref']
+ avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
+ return [avail_ips['ips']]
+ except Exception as exc:
+ raise AnsibleError(to_text(exc))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py
new file mode 100644
index 00000000..e76ff24b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py
@@ -0,0 +1,113 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios_next_network
+short_description: Return the next available network range for a network-container
+description:
+ - Uses the Infoblox WAPI API to return the next available network addresses for
+ a given network CIDR
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The CIDR network to retrieve the next network from next available network within the specified
+ container.
+ required: True
+ cidr:
+ description:
+ - The CIDR of the network to retrieve the next network from next available network within the
+ specified container. Also, Requested CIDR must be specified and greater than the parent CIDR.
+ required: True
+ default: 24
+ num:
+ description: The number of network addresses to return from network-container
+ required: false
+ default: 1
+ exclude:
+ description: Network addresses returned from network-container excluding list of user's input network range
+ required: false
+ default: ''
+'''
+
+EXAMPLES = """
+- name: return next available network for network-container 192.168.10.0/24
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25,
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 2 available network addresses for network-container 192.168.10.0/24
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, num=2,
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+"""
+
+RETURN = """
+_list:
+ description:
+ - The list of next network addresses available
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ network = terms[0]
+ except IndexError:
+ raise AnsibleError('missing network argument in the form of A.B.C.D/E')
+ try:
+ cidr = kwargs.get('cidr', 24)
+ except IndexError:
+ raise AnsibleError('missing CIDR argument in the form of xx')
+
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+ network_obj = wapi.get_object('networkcontainer', {'network': network})
+
+ if network_obj is None:
+ raise AnsibleError('unable to find network-container object %s' % network)
+ num = kwargs.get('num', 1)
+ exclude_ip = kwargs.get('exclude', [])
+
+ try:
+ ref = network_obj[0]['_ref']
+ avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip})
+ return [avail_nets['networks']]
+ except Exception as exc:
+ raise AnsibleError(to_text(exc))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py
new file mode 100644
index 00000000..995037a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright: (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: onepassword
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch field values from 1Password
+ description:
+ - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
+ required: True
+ field:
+ description: field to return from each matching item (case-insensitive).
+ default: 'password'
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
+ You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 0.5.3
+'''
+
+EXAMPLES = """
+# These examples only work when already signed in to 1Password
+- name: Retrieve password for KITT when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'KITT')
+
+- name: Retrieve password for Wintermute when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute')
+
+- name: Retrieve username for HAL when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery')
+
+- name: Retrieve password for HAL when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password)
+
+- name: Retrieve password for HAL when never signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password
+ username='tweety@acme.com'
+ secret_key=vault_secret_key)
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: str
+"""
+
+import errno
+import json
+import os
+
+from subprocess import Popen, PIPE
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class OnePass(object):
+
+ def __init__(self, path='op'):
+ self.cli_path = path
+ self.config_file_path = os.path.expanduser('~/.op/config')
+ self.logged_in = False
+ self.token = None
+ self.subdomain = None
+ self.username = None
+ self.secret_key = None
+ self.master_password = None
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if not self.master_password:
+ raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.')
+
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.subdomain:
+ args = ['signin', self.subdomain, '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
+ self.token = out.strip()
+
+ except AnsibleLookupError:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def get_raw(self, item_id, vault=None):
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ if not self.logged_in:
+ args += [to_bytes('--session=') + self.token]
+ rc, output, dummy = self._run(args)
+ return output
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, field, section) if output != '' else ''
+
+ def full_login(self):
+ if None in [self.subdomain, self.username, self.secret_key, self.master_password]:
+ raise AnsibleLookupError('Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.subdomain),
+ to_bytes(self.username),
+ to_bytes(self.secret_key),
+ '--output=raw',
+ ]
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
+ self.token = out.strip()
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleLookupError(to_text(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, field_name, section_title=None):
+ """
+ Retrieves the desired field from the `op` response payload
+
+ When the item is a `password` type, the password is a key within the `details` key:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "templateUuid": "005",
+ "details": {
+ "notesPlain": "",
+ "password": "foobar",
+ "passwordHistory": [],
+ "sections": [
+ {
+ "name": "linked items",
+ "title": "Related Items"
+ }
+ ]
+ },
+ [...]
+ }
+
+ However, when the item is a `login` type, the password is within a fields array:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "details": {
+ "fields": [
+ {
+ "designation": "username",
+ "name": "username",
+ "type": "T",
+ "value": "foo"
+ },
+ {
+ "designation": "password",
+ "name": "password",
+ "type": "P",
+ "value": "bar"
+ }
+ ],
+ [...]
+ },
+ [...]
+ """
+ data = json.loads(data_json)
+ if section_title is None:
+ # https://github.com/ansible-collections/community.general/pull/1610:
+ # check the details dictionary for `field_name` and return it immediately if it exists
+ # when the entry is a "password" instead of a "login" item, the password field is a key
+ # in the `details` dictionary:
+ if field_name in data['details']:
+ return data['details'][field_name]
+
+ # when the field is not found above, iterate through the fields list in the object details
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return field_data.get('value', '')
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return field_data.get('v', '')
+ return ''
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ op = OnePass()
+
+ field = kwargs.get('field', 'password')
+ section = kwargs.get('section')
+ vault = kwargs.get('vault')
+ op.subdomain = kwargs.get('subdomain')
+ op.username = kwargs.get('username')
+ op.secret_key = kwargs.get('secret_key')
+ op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
+
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ values.append(op.get_field(term, field, section, vault))
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
new file mode 100644
index 00000000..76423a23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright: (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: onepassword_raw
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch an entire item from 1Password
+ description:
+ - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve.
+ required: True
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
+ You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 0.5.3
+'''
+
+EXAMPLES = """
+- name: Retrieve all data about Wintermute
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute')
+
+- name: Retrieve all data about Wintermute when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: dict
+"""
+
+import json
+
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ op = OnePass()
+
+ vault = kwargs.get('vault')
+ op.subdomain = kwargs.get('subdomain')
+ op.username = kwargs.get('username')
+ op.secret_key = kwargs.get('secret_key')
+ op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
+
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ data = json.loads(op.get_raw(term, vault))
+ values.append(data)
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py
new file mode 100644
index 00000000..4d0f6461
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py
@@ -0,0 +1,302 @@
+# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ lookup: passwordstore
+ author:
+ - Patrick Deelman (!UNKNOWN) <patrick@patrickdeelman.nl>
+ short_description: manage passwords with passwordstore.org's pass utility
+ description:
+ - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
+ It also retrieves YAML style keys stored as multilines in the passwordfile.
+ options:
+ _terms:
+ description: query key.
+ required: True
+ passwordstore:
+ description: location of the password store.
+ default: '~/.password-store'
+ directory:
+ description: The directory of the password store.
+ env:
+ - name: PASSWORD_STORE_DIR
+ create:
+ description: Create the password if it does not already exist.
+ type: bool
+ default: 'no'
+ overwrite:
+ description: Overwrite the password if it does already exist.
+ type: bool
+ default: 'no'
+ umask:
+ description:
+ - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
+ - Note pass' default value is C('077').
+ env:
+ - name: PASSWORD_STORE_UMASK
+ version_added: 1.3.0
+ returnall:
+ description: Return all the content of the password, not only the first line.
+ type: bool
+ default: 'no'
+ subkey:
+ description: Return a specific subkey of the password. When set to C(password), always returns the first line.
+ default: password
+ userpass:
+ description: Specify a password to save, instead of a generated one.
+ length:
+ description: The length of the generated password.
+ type: integer
+ default: 16
+ backup:
+ description: Used with C(overwrite=yes). Backup the previous password in a subkey.
+ type: bool
+ default: 'no'
+ nosymbols:
+ description: use alphanumeric characters.
+ type: bool
+ default: 'no'
+'''
+EXAMPLES = """
+# Debug is used for examples, BAD IDEA to show passwords on screen
+- name: Basic lookup. Fails if example/test doesn't exist
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
+
+- name: Create pass with random 16 character password. If password exists just give the password
+ ansible.builtin.debug:
+ var: mypassword
+ vars:
+ mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
+
+- name: Different size password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
+
+- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
+
+- name: Create an alphanumeric password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
+
+- name: Return the value for user in the KV pair user, username
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
+
+- name: Return the entire password file content
+ ansible.builtin.set_fact:
+ passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - a password
+ type: list
+ elements: str
+"""
+
+import os
+import subprocess
+import time
+
+from distutils import util
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.encrypt import random_password
+from ansible.plugins.lookup import LookupBase
+from ansible import constants as C
+
+
+# backhacked check_output with input for python 2.7
+# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
+def check_output2(*popenargs, **kwargs):
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ if 'stderr' in kwargs:
+ raise ValueError('stderr argument not allowed, it will be overridden.')
+ if 'input' in kwargs:
+ if 'stdin' in kwargs:
+ raise ValueError('stdin and input arguments may not both be used.')
+ b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
+ del kwargs['input']
+ kwargs['stdin'] = subprocess.PIPE
+ else:
+ b_inputdata = None
+ process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
+ try:
+ b_out, b_err = process.communicate(b_inputdata)
+ except Exception:
+ process.kill()
+ process.wait()
+ raise
+ retcode = process.poll()
+ if retcode != 0 or \
+ b'encryption failed: Unusable public key' in b_out or \
+ b'encryption failed: Unusable public key' in b_err:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise subprocess.CalledProcessError(
+ retcode,
+ cmd,
+ to_native(b_out + b_err, errors='surrogate_or_strict')
+ )
+ return b_out
+
+
+class LookupModule(LookupBase):
+ def parse_params(self, term):
+ # I went with the "traditional" param followed with space separated KV pairs.
+ # Waiting for final implementation of lookup parameter parsing.
+ # See: https://github.com/ansible/ansible/issues/12255
+ params = term.split()
+ if len(params) > 0:
+ # the first param is the pass-name
+ self.passname = params[0]
+ # next parse the optional parameters in keyvalue pairs
+ try:
+ for param in params[1:]:
+ name, value = param.split('=', 1)
+ if name not in self.paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ self.paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ # check and convert values
+ try:
+ for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
+ if not isinstance(self.paramvals[key], bool):
+ self.paramvals[key] = util.strtobool(self.paramvals[key])
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ if not isinstance(self.paramvals['length'], int):
+ if self.paramvals['length'].isdigit():
+ self.paramvals['length'] = int(self.paramvals['length'])
+ else:
+ raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
+
+ # Collect pass environment variables from the plugin's parameters.
+ self.env = os.environ.copy()
+
+ # Set PASSWORD_STORE_DIR if directory is set
+ if self.paramvals['directory']:
+ if os.path.isdir(self.paramvals['directory']):
+ self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
+ else:
+ raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
+
+ # Set PASSWORD_STORE_UMASK if umask is set
+ if 'umask' in self.paramvals:
+ if len(self.paramvals['umask']) != 3:
+ raise AnsibleError('Passwordstore umask must have a length of 3.')
+ elif int(self.paramvals['umask'][0]) > 3:
+ raise AnsibleError('Passwordstore umask not allowed (password not user readable).')
+ else:
+ self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask']
+
+ def check_pass(self):
+ try:
+ self.passoutput = to_text(
+ check_output2(["pass", "show", self.passname], env=self.env),
+ errors='surrogate_or_strict'
+ ).splitlines()
+ self.password = self.passoutput[0]
+ self.passdict = {}
+ for line in self.passoutput[1:]:
+ if ':' in line:
+ name, value = line.split(':', 1)
+ self.passdict[name.strip()] = value.strip()
+ except (subprocess.CalledProcessError) as e:
+ if e.returncode != 0 and 'not in the password store' in e.output:
+ # if pass returns 1 and return string contains 'is not in the password store.'
+ # We need to determine if this is valid or Error.
+ if not self.paramvals['create']:
+ raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
+ else:
+ return False
+ else:
+ raise AnsibleError(e)
+ return True
+
+ def get_newpass(self):
+ if self.paramvals['nosymbols']:
+ chars = C.DEFAULT_PASSWORD_CHARS[:62]
+ else:
+ chars = C.DEFAULT_PASSWORD_CHARS
+
+ if self.paramvals['userpass']:
+ newpass = self.paramvals['userpass']
+ else:
+ newpass = random_password(length=self.paramvals['length'], chars=chars)
+ return newpass
+
+ def update_password(self):
+ # generate new password, insert old lines from current result and return new password
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n'
+ if self.passoutput[1:]:
+ msg += '\n'.join(self.passoutput[1:]) + '\n'
+ if self.paramvals['backup']:
+ msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
+ try:
+ check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError(e)
+ return newpass
+
+ def generate_password(self):
+ # generate new file and insert lookup_pass: Generated by Ansible on {date}
+ # use pwgen to generate the password and insert values with pass -m
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
+ try:
+ check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError(e)
+ return newpass
+
+ def get_passresult(self):
+ if self.paramvals['returnall']:
+ return os.linesep.join(self.passoutput)
+ if self.paramvals['subkey'] == 'password':
+ return self.password
+ else:
+ if self.paramvals['subkey'] in self.passdict:
+ return self.passdict[self.paramvals['subkey']]
+ else:
+ return None
+
+ def run(self, terms, variables, **kwargs):
+ result = []
+ self.paramvals = {
+ 'subkey': 'password',
+ 'directory': variables.get('passwordstore'),
+ 'create': False,
+ 'returnall': False,
+ 'overwrite': False,
+ 'nosymbols': False,
+ 'userpass': '',
+ 'length': 16,
+ 'backup': False,
+ }
+
+ for term in terms:
+ self.parse_params(term) # parse the input into paramvals
+ if self.check_pass(): # password exists
+ if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
+ result.append(self.update_password())
+ else:
+ result.append(self.get_passresult())
+ else: # password does not exist
+ if self.paramvals['create']:
+ result.append(self.generate_password())
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py
new file mode 100644
index 00000000..67d35c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py
@@ -0,0 +1,117 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: redis
+ author:
+ - Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ - Ansible Core Team
+ short_description: fetch data from Redis
+ description:
+ - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
+ requirements:
+ - redis (python library https://github.com/andymccurdy/redis-py/)
+ options:
+ _terms:
+ description: list of keys to query
+ host:
+ description: location of Redis host
+ default: '127.0.0.1'
+ env:
+ - name: ANSIBLE_REDIS_HOST
+ ini:
+ - section: lookup_redis
+ key: host
+ port:
+ description: port on which Redis is listening on
+ default: 6379
+ type: int
+ env:
+ - name: ANSIBLE_REDIS_PORT
+ ini:
+ - section: lookup_redis
+ key: port
+ socket:
+ description: path to socket on which to query Redis, this option overrides host and port options when set.
+ type: path
+ env:
+ - name: ANSIBLE_REDIS_SOCKET
+ ini:
+ - section: lookup_redis
+ key: socket
+'''
+
+EXAMPLES = """
+- name: query redis for somekey (default or configured settings used)
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'somekey') }}"
+
+- name: query redis for list of keys and non-default host and port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}"
+ loop: '{{list_of_redis_keys}}'
+
+- name: use list directly
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}"
+
+- name: use list directly with a socket
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
+
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in Redis
+ type: list
+ elements: str
+"""
+
+import os
+
+HAVE_REDIS = False
+try:
+ import redis
+ HAVE_REDIS = True
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ if not HAVE_REDIS:
+ raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ # setup connection
+ host = self.get_option('host')
+ port = self.get_option('port')
+ socket = self.get_option('socket')
+ if socket is None:
+ conn = redis.Redis(host=host, port=port)
+ else:
+ conn = redis.Redis(unix_socket_path=socket)
+
+ ret = []
+ for term in terms:
+ try:
+ res = conn.get(term)
+ if res is None:
+ res = ""
+ ret.append(to_text(res))
+ except Exception as e:
+ # connection failed or key not found
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py
new file mode 100644
index 00000000..cfeb61f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py
@@ -0,0 +1,90 @@
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: shelvefile
+ author: Alejandro Guirao (!UNKNOWN) <lekumberri@gmail.com>
+ short_description: read keys from Python shelve file
+ description:
+ - Read keys from Python shelve file.
+ options:
+ _terms:
+ description: sets of key value pairs of parameters
+ key:
+ description: key to query
+ required: True
+ file:
+ description: path to shelve file
+ required: True
+'''
+
+EXAMPLES = """
+- name: retrieve a string value corresponding to a key inside a Python shelve file
+ ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}
+"""
+
+RETURN = """
+_list:
+ description: value(s) of key(s) in shelve file(s)
+ type: list
+ elements: str
+"""
+import shelve
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def read_shelve(self, shelve_filename, key):
+ """
+ Read the value of "key" from a shelve file
+ """
+ d = shelve.open(to_bytes(shelve_filename))
+ res = d.get(key, None)
+ d.close()
+ return res
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ ret = []
+
+ for term in terms:
+ paramvals = {"file": None, "key": None}
+ params = term.split()
+
+ try:
+ for param in params:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ paramvals[name] = value
+
+ except (ValueError, AssertionError) as e:
+ # In case "file" or "key" are not present
+ raise AnsibleError(e)
+
+ key = paramvals['key']
+
+ # Search also in the role/files directory and in the playbook directory
+ shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+
+ if shelvefile:
+ res = self.read_shelve(shelvefile, key)
+ if res is None:
+ raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile))
+ # Convert the value read to string
+ ret.append(to_text(res))
+ break
+ else:
+ raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file'])
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py
new file mode 100644
index 00000000..32c0460c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+lookup: tss
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic Secret Server
+version_added: 1.0.0
+description:
+ - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
+ Server using token authentication with I(username) and I(password) on
+ the REST API at I(base_url).
+requirements:
+ - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
+options:
+ _terms:
+ description: The integer ID of the secret.
+ required: true
+ type: int
+ base_url:
+ description: The base URL of the server, e.g. C(https://localhost/SecretServer).
+ env:
+ - name: TSS_BASE_URL
+ ini:
+ - section: tss_lookup
+ key: base_url
+ required: true
+ username:
+ description: The username with which to request the OAuth2 Access Grant.
+ env:
+ - name: TSS_USERNAME
+ ini:
+ - section: tss_lookup
+ key: username
+ required: true
+ password:
+ description: The password associated with the supplied username.
+ env:
+ - name: TSS_PASSWORD
+ ini:
+ - section: tss_lookup
+ key: password
+ required: true
+ api_path_uri:
+ default: /api/v1
+ description: The path to append to the base URL to form a valid REST
+ API request.
+ env:
+ - name: TSS_API_PATH_URI
+ required: false
+ token_path_uri:
+ default: /oauth2/token
+ description: The path to append to the base URL to form a valid OAuth2
+ Access Grant request.
+ env:
+ - name: TSS_TOKEN_PATH_URI
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - The JSON responses to C(GET /secrets/{id}).
+ - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: "{{ lookup('community.general.tss', 1) }}"
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
+"""
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+
+sdk_is_missing = False
+
+try:
+ from thycotic.secrets.server import (
+ SecretServer,
+ SecretServerAccessError,
+ SecretServerError,
+ )
+except ImportError:
+ sdk_is_missing = True
+
+from ansible.utils.display import Display
+from ansible.plugins.lookup import LookupBase
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def Client(server_parameters):
+ return SecretServer(**server_parameters)
+
+ def run(self, terms, variables, **kwargs):
+ if sdk_is_missing:
+ raise AnsibleError("python-tss-sdk must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ secret_server = LookupModule.Client(
+ {
+ "base_url": self.get_option("base_url"),
+ "username": self.get_option("username"),
+ "password": self.get_option("password"),
+ "api_path_uri": self.get_option("api_path_uri"),
+ "token_path_uri": self.get_option("token_path_uri"),
+ }
+ )
+ result = []
+
+ for term in terms:
+ display.debug("tss_lookup term: %s" % term)
+ try:
+ id = int(term)
+ display.vvv(u"Secret Server lookup of Secret with ID %d" % id)
+ result.append(secret_server.get_secret_json(id))
+ except ValueError:
+ raise AnsibleOptionsError("Secret ID must be an integer")
+ except SecretServerError as error:
+ raise AnsibleError("Secret Server lookup failure: %s" % error.message)
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py
new file mode 100644
index 00000000..62feb354
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py
@@ -0,0 +1,90 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is based on
+# Lib/posixpath.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+import os
+
+
+def ismount(path):
+ """Test whether a path is a mount point
+ This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
+ until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
+ that may not have the upstream fix.
+ https://github.com/ansible/ansible-modules-core/issues/2186
+ http://bugs.python.org/issue2466
+ """
+ try:
+ s1 = os.lstat(path)
+ except (OSError, ValueError):
+ # It doesn't exist -- so not a mount point. :-)
+ return False
+ else:
+ # A symlink can never be a mount point
+ if os.path.stat.S_ISLNK(s1.st_mode):
+ return False
+
+ if isinstance(path, bytes):
+ parent = os.path.join(path, b'..')
+ else:
+ parent = os.path.join(path, '..')
+ parent = os.path.realpath(parent)
+ try:
+ s2 = os.lstat(parent)
+ except (OSError, ValueError):
+ return False
+
+ dev1 = s1.st_dev
+ dev2 = s2.st_dev
+ if dev1 != dev2:
+ return True # path/.. on a different device as path
+ ino1 = s1.st_ino
+ ino2 = s2.st_ino
+ if ino1 == ino2:
+ return True # path/.. is the same i-node as path
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py
new file mode 100644
index 00000000..d80506bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py
@@ -0,0 +1,747 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+import ssl
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
+ )
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def aws_cvs_host_argument_spec():
+
+ return dict(
+ api_url=dict(required=True, type='str'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ api_key=dict(required=True, type='str', no_log=True),
+ secret_key=dict(required=True, type='str', no_log=True)
+ )
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def setup_na_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def setup_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ # Todo : Replace hard-coded values with configurable parameters.
+ server.set_api_version(major=1, minor=110)
+ server.set_port(80)
+ server.set_server_type('FILER')
+ server.set_transport_type('HTTP')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=False, default='1'),
+ validate_certs=dict(type='bool', required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - netapp.eseries
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then
+ a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ """
+ DEFAULT_TIMEOUT = 60
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_embedded_mode = None
+ self.is_web_services_valid_cache = None
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+
+ self.module.log("Web services rest api version met the minimum required version.")
+ self.is_web_services_valid_cache = True
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services.
+
+ If web services about endpoint fails based on an URLError then the request will be attempted again using
+ secure http.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_embedded_mode is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ self.is_embedded_mode = not data["runningAsProxy"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return self.is_embedded_mode
+
+ def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ """
+ self._check_web_services_version()
+
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+
+ if not isinstance(data, str) and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = self.url + self.DEFAULT_REST_API_PATH + path
+
+ if self.log_requests or True:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method)))
+
+ return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
+ timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", id)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ server.invoke_successfully(ems_log, True)
+
+
+def get_cserver_zapi(server):
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ attribute_list = result.get_child_by_name('attributes-list')
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ return vserver_list.get_child_content('vserver-name')
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+class OntapRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.hostname + '/api/'
+ self.errors = list()
+ self.debug_logs = list()
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, return_status_code=False):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ try:
+ response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if return_status_code:
+ return status_code, error_details
+ return json_dict, error_details
+
+ def get(self, api, params):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest == "Always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'Never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ method = 'HEAD'
+ api = 'cluster/software'
+ status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
+ if status_code == 200:
+ return True, None
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+
+class AwsCvsRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.api_key = self.module.params['api_key']
+ self.secret_key = self.module.params['secret_key']
+ self.api_url = self.module.params['api_url']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.api_url + '/v1/'
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = {
+ 'Content-type': "application/json",
+ 'api-key': self.api_key,
+ 'secret-key': self.secret_key,
+ 'Cache-Control': "no-cache",
+ }
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ else:
+ error = None
+ return json, error
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ error_details = str(err)
+ except Exception as err:
+ error_details = str(err)
+ if json_error is not None:
+ error_details = json_error
+
+ return json_dict, error_details
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def put(self, api, data, params=None):
+ method = 'PUT'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def get_state(self, jobId):
+ """ Method to get the state of the job """
+ method = 'GET'
+ response, status_code = self.get('Jobs/%s' % jobId)
+ while str(response['state']) not in 'done':
+ response, status_code = self.get('Jobs/%s' % jobId)
+ return 'done'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py
new file mode 100644
index 00000000..5ccd1482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py
@@ -0,0 +1,871 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import inspect
+import os
+import time
+
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.cloud import CloudRetry
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ from enum import Enum # enum is a ovirtsdk4 requirement
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.version as sdk_version
+ import ovirtsdk4.types as otypes
+ HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0')
+except ImportError:
+ HAS_SDK = False
+
+
+BYTES_MAP = {
+ 'kib': 2**10,
+ 'mib': 2**20,
+ 'gib': 2**30,
+ 'tib': 2**40,
+ 'pib': 2**50,
+}
+
+
+def check_sdk(module):
+ if not HAS_SDK:
+ module.fail_json(
+ msg='ovirtsdk4 version 4.3.0 or higher is required for this module'
+ )
+
+
+def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
+ """
+ Convert SDK Struct type into dictionary.
+ """
+ res = {}
+
+ def resolve_href(value):
+ # Fetch nested values of struct:
+ try:
+ value = connection.follow_link(value)
+ except sdk.Error:
+ value = None
+ nested_obj = dict(
+ (attr, convert_value(getattr(value, attr)))
+ for attr in attributes if getattr(value, attr, None) is not None
+ )
+ nested_obj['id'] = getattr(value, 'id', None)
+ nested_obj['href'] = getattr(value, 'href', None)
+ return nested_obj
+
+ def remove_underscore(val):
+ if val.startswith('_'):
+ val = val[1:]
+ remove_underscore(val)
+ return val
+
+ def convert_value(value):
+ nested = False
+
+ if isinstance(value, sdk.Struct):
+ if not fetch_nested or not value.href:
+ return get_dict_of_struct(value)
+ return resolve_href(value)
+
+ elif isinstance(value, Enum) or isinstance(value, datetime):
+ return str(value)
+ elif isinstance(value, list) or isinstance(value, sdk.List):
+ if isinstance(value, sdk.List) and fetch_nested and value.href:
+ try:
+ value = connection.follow_link(value)
+ nested = True
+ except sdk.Error:
+ value = []
+
+ ret = []
+ for i in value:
+ if isinstance(i, sdk.Struct):
+ if not nested and fetch_nested and i.href:
+ ret.append(resolve_href(i))
+ elif not nested:
+ ret.append(get_dict_of_struct(i))
+ else:
+ nested_obj = dict(
+ (attr, convert_value(getattr(i, attr)))
+ for attr in attributes if getattr(i, attr, None)
+ )
+ nested_obj['id'] = getattr(i, 'id', None)
+ ret.append(nested_obj)
+ elif isinstance(i, Enum):
+ ret.append(str(i))
+ else:
+ ret.append(i)
+ return ret
+ else:
+ return value
+
+ if struct is not None:
+ for key, value in struct.__dict__.items():
+ if value is None:
+ continue
+
+ key = remove_underscore(key)
+ res[key] = convert_value(value)
+
+ return res
+
+
+def engine_version(connection):
+ """
+ Return string representation of oVirt engine version.
+ """
+ engine_api = connection.system_service().get()
+ engine_version = engine_api.product_info.version
+ return '%s.%s' % (engine_version.major, engine_version.minor)
+
+
+def create_connection(auth):
+ """
+ Create a connection to Python SDK, from task `auth` parameter.
+ If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
+ url, username, password
+
+ If user has SSO token the `auth` dictionary has following parameters mandatory:
+ url, token
+
+ The `ca_file` parameter is mandatory in case user want to use secure connection,
+ in case user want to use insecure connection, it's mandatory to send insecure=True.
+
+ :param auth: dictionary which contains needed values for connection creation
+ :return: Python SDK connection
+ """
+
+ url = auth.get('url')
+ if url is None and auth.get('hostname') is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
+
+ return sdk.Connection(
+ url=url,
+ username=auth.get('username'),
+ password=auth.get('password'),
+ ca_file=auth.get('ca_file', None),
+ insecure=auth.get('insecure', False),
+ token=auth.get('token', None),
+ kerberos=auth.get('kerberos', None),
+ headers=auth.get('headers', None),
+ )
+
+
+def convert_to_bytes(param):
+ """
+ This method convert units to bytes, which follow IEC standard.
+
+ :param param: value to be converted
+ """
+ if param is None:
+ return None
+
+ # Get rid of whitespaces:
+ param = ''.join(param.split())
+
+ # Convert to bytes:
+ if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
+ return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
+ elif param.isdigit():
+ return int(param) * 2**10
+ else:
+ raise ValueError(
+ "Unsupported value(IEC supported): '{value}'".format(value=param)
+ )
+
+
+def follow_link(connection, link):
+ """
+ This method returns the entity of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: entity which link points to
+ """
+
+ if link:
+ return connection.follow_link(link)
+ else:
+ return None
+
+
+def get_link_name(connection, link):
+ """
+ This method returns the name of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: name of the entity, which link points to
+ """
+
+ if link:
+ return connection.follow_link(link).name
+ else:
+ return None
+
+
+def equal(param1, param2, ignore_case=False):
+ """
+ Compare two parameters and return if they are equal.
+ This parameter doesn't run equal operation if first parameter is None.
+ With this approach we don't run equal operation in case user don't
+ specify parameter in their task.
+
+ :param param1: user inputted parameter
+ :param param2: value of entity parameter
+ :return: True if parameters are equal or first parameter is None, otherwise False
+ """
+ if param1 is not None:
+ if ignore_case:
+ return param1.lower() == param2.lower()
+ return param1 == param2
+ return True
+
+
+def search_by_attributes(service, list_params=None, **kwargs):
+ """
+ Search for the entity by attributes. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by specified attributes.
+ """
+ list_params = list_params or {}
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
+ **list_params
+ )
+ else:
+ res = [
+ e for e in service.list(**list_params) if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def search_by_name(service, name, **kwargs):
+ """
+ Search for the entity by its name. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by name.
+
+ :param service: service of the entity
+ :param name: name of the entity
+ :return: Entity object returned by Python SDK
+ """
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search='name="{name}"'.format(name=name)
+ )
+ else:
+ res = [e for e in service.list() if e.name == name]
+
+ if kwargs:
+ res = [
+ e for e in service.list() if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def get_entity(service, get_params=None):
+ """
+ Ignore SDK Error in case of getting an entity from service.
+ """
+ entity = None
+ try:
+ if get_params is not None:
+ entity = service.get(**get_params)
+ else:
+ entity = service.get()
+ except sdk.Error:
+ # We can get here 404, we should ignore it, in case
+ # of removing entity for example.
+ pass
+ return entity
+
+
+def get_id_by_name(service, name, raise_error=True, ignore_case=False):
+ """
+ Search an entity ID by it's name.
+ """
+ entity = search_by_name(service, name)
+
+ if entity is not None:
+ return entity.id
+
+ if raise_error:
+ raise Exception("Entity '%s' was not found." % name)
+
+
+def wait(
+ service,
+ condition,
+ fail_condition=lambda e: False,
+ timeout=180,
+ wait=True,
+ poll_interval=3,
+):
+ """
+ Wait until entity fulfill expected condition.
+
+ :param service: service of the entity
+ :param condition: condition to be fulfilled
+ :param fail_condition: if this condition is true, raise Exception
+ :param timeout: max time to wait in seconds
+ :param wait: if True wait for condition, if False don't wait
+ :param poll_interval: Number of seconds we should wait until next condition check
+ """
+ # Wait until the desired state of the entity:
+ if wait:
+ start = time.time()
+ while time.time() < start + timeout:
+ # Exit if the condition of entity is valid:
+ entity = get_entity(service)
+ if condition(entity):
+ return
+ elif fail_condition(entity):
+ raise Exception("Error while waiting on result state of the entity.")
+
+ # Sleep for `poll_interval` seconds if none of the conditions apply:
+ time.sleep(float(poll_interval))
+
+ raise Exception("Timeout exceed while waiting on result state of the entity.")
+
+
+def __get_auth_dict():
+ OVIRT_URL = os.environ.get('OVIRT_URL')
+ OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
+ OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
+ OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
+ OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
+ OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
+ OVIRT_INSECURE = OVIRT_CAFILE is None
+
+ env_vars = None
+ if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
+ OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
+ if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
+ env_vars = {
+ 'url': OVIRT_URL,
+ 'username': OVIRT_USERNAME,
+ 'password': OVIRT_PASSWORD,
+ 'insecure': OVIRT_INSECURE,
+ 'token': OVIRT_TOKEN,
+ 'ca_file': OVIRT_CAFILE,
+ }
+ if env_vars is not None:
+ auth = dict(default=env_vars, type='dict')
+ else:
+ auth = dict(required=True, type='dict')
+
+ return auth
+
+
+def ovirt_info_full_argument_spec(**kwargs):
+ """
+ Extend parameters of info module with parameters which are common to all
+ oVirt info modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list()),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+# Left for third-party module compatibility
+def ovirt_facts_full_argument_spec(**kwargs):
+ """
+ This is deprecated. Please use ovirt_info_full_argument_spec instead!
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ return ovirt_info_full_argument_spec(**kwargs)
+
+
+def ovirt_full_argument_spec(**kwargs):
+ """
+ Extend parameters of module with parameters which are common to all oVirt modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ timeout=dict(default=180, type='int'),
+ wait=dict(default=True, type='bool'),
+ poll_interval=dict(default=3, type='int'),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list()),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+def check_params(module):
+ """
+ Most modules must have either `name` or `id` specified.
+ """
+ if module.params.get('name') is None and module.params.get('id') is None:
+ module.fail_json(msg='"name" or "id" is required')
+
+
+def engine_supported(connection, version):
+ return LooseVersion(engine_version(connection)) >= LooseVersion(version)
+
+
+def check_support(version, connection, module, params):
+ """
+ Check if parameters used by user are supported by oVirt Python SDK
+ and oVirt engine.
+ """
+ api_version = LooseVersion(engine_version(connection))
+ version = LooseVersion(version)
+ for param in params:
+ if module.params.get(param) is not None:
+ return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
+
+ return True
+
+
+class BaseModule(object):
+ """
+ This is base class for oVirt modules. oVirt modules should inherit this
+ class and override method to customize specific needs of the module.
+ The only abstract method of this class is `build_entity`, which must
+ to be implemented in child class.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, connection, module, service, changed=False):
+ self._connection = connection
+ self._module = module
+ self._service = service
+ self._changed = changed
+ self._diff = {'after': dict(), 'before': dict()}
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, changed):
+ if not self._changed:
+ self._changed = changed
+
+ @abstractmethod
+ def build_entity(self):
+ """
+ This method should return oVirt Python SDK type, which we want to
+ create or update, initialized by values passed by Ansible module.
+
+ For example if we want to create VM, we will return following:
+ types.Vm(name=self._module.params['vm_name'])
+
+ :return: Specific instance of sdk.Struct.
+ """
+ pass
+
+ def param(self, name, default=None):
+ """
+ Return a module parameter specified by it's name.
+ """
+ return self._module.params.get(name, default)
+
+ def update_check(self, entity):
+ """
+ This method handle checks whether the entity values are same as values
+ passed to ansible module. By default we don't compare any values.
+
+ :param entity: Entity we want to compare with Ansible module values.
+ :return: True if values are same, so we don't need to update the entity.
+ """
+ return True
+
+ def pre_create(self, entity):
+ """
+ This method is called right before entity is created.
+
+ :param entity: Entity to be created or updated.
+ """
+ pass
+
+ def post_create(self, entity):
+ """
+ This method is called right after entity is created.
+
+ :param entity: Entity which was created.
+ """
+ pass
+
+ def post_update(self, entity):
+ """
+ This method is called right after entity is updated.
+
+ :param entity: Entity which was updated.
+ """
+ pass
+
+ def diff_update(self, after, update):
+ for k, v in update.items():
+ if isinstance(v, Mapping):
+ after[k] = self.diff_update(after.get(k, dict()), v)
+ else:
+ after[k] = update[k]
+ return after
+
+ def create(
+ self,
+ entity=None,
+ result_state=None,
+ fail_condition=lambda e: False,
+ search_params=None,
+ update_params=None,
+ _wait=None,
+ force_create=False,
+ **kwargs
+ ):
+ """
+ Method which is called when state of the entity is 'present'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's updated, whether
+ the entity should be updated is checked by `update_check` method.
+ The corresponding updated entity is build by `build_entity` method.
+
+ Function executed after entity is created can optionally be specified
+ in `post_create` parameter. Function executed after entity is updated
+ can optionally be specified in `post_update` parameter.
+
+ :param entity: Entity we want to update, if exists.
+ :param result_state: State which should entity has in order to finish task.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param update_params: The params which should be passed to update method.
+ :param kwargs: Additional parameters passed when creating entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None and not force_create:
+ entity = self.search_entity(search_params)
+
+ self.pre_create(entity)
+
+ if entity:
+ # Entity exists, so update it:
+ entity_service = self._service.service(entity.id)
+ if not self.update_check(entity):
+ new_entity = self.build_entity()
+ if not self._module.check_mode:
+ update_params = update_params or {}
+ updated_entity = entity_service.update(
+ new_entity,
+ **update_params
+ )
+ self.post_update(entity)
+
+ # Update diffs only if user specified --diff parameter,
+ # so we don't useless overload API:
+ if self._module._diff:
+ before = get_dict_of_struct(
+ entity,
+ self._connection,
+ fetch_nested=True,
+ attributes=['name'],
+ )
+ after = before.copy()
+ self.diff_update(after, get_dict_of_struct(new_entity))
+ self._diff['before'] = before
+ self._diff['after'] = after
+
+ self.changed = True
+ else:
+ # Entity don't exists, so create it:
+ if not self._module.check_mode:
+ entity = self._service.add(
+ self.build_entity(),
+ **kwargs
+ )
+ self.post_create(entity)
+ self.changed = True
+
+ if not self._module.check_mode:
+ # Wait for the entity to be created and to be in the defined state:
+ entity_service = self._service.service(entity.id)
+
+ def state_condition(entity):
+ return entity
+
+ if result_state:
+
+ def state_condition(entity):
+ return entity and entity.status == result_state
+
+ wait(
+ service=entity_service,
+ condition=state_condition,
+ fail_condition=fail_condition,
+ wait=_wait if _wait is not None else self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+
+ return {
+ 'changed': self.changed,
+ 'id': getattr(entity, 'id', None),
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def pre_remove(self, entity):
+ """
+ This method is called right before entity is removed.
+
+ :param entity: Entity which we want to remove.
+ """
+ pass
+
+ def entity_name(self, entity):
+ return "{e_type} '{e_name}'".format(
+ e_type=type(entity).__name__.lower(),
+ e_name=getattr(entity, 'name', None),
+ )
+
+ def remove(self, entity=None, search_params=None, **kwargs):
+ """
+ Method which is called when state of the entity is 'absent'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's removed.
+
+ Function executed before remove is executed can optionally be specified
+ in `pre_remove` parameter.
+
+ :param entity: Entity we want to remove.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed when removing entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ if entity is None:
+ return {
+ 'changed': self.changed,
+ 'msg': "Entity wasn't found."
+ }
+
+ self.pre_remove(entity)
+
+ entity_service = self._service.service(entity.id)
+ if not self._module.check_mode:
+ entity_service.remove(**kwargs)
+ wait(
+ service=entity_service,
+ condition=lambda entity: not entity,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ self.changed = True
+
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ }
+
+ def action(
+ self,
+ action,
+ entity=None,
+ action_condition=lambda e: e,
+ wait_condition=lambda e: e,
+ fail_condition=lambda e: False,
+ pre_action=lambda e: e,
+ post_action=lambda e: None,
+ search_params=None,
+ **kwargs
+ ):
+ """
+ This method is executed when we want to change the state of some oVirt
+ entity. The action to be executed on oVirt service is specified by
+ `action` parameter. Whether the action should be executed can be
+ specified by passing `action_condition` parameter. State which the
+ entity should be in after execution of the action can be specified
+ by `wait_condition` parameter.
+
+ Function executed before an action on entity can optionally be specified
+ in `pre_action` parameter. Function executed after an action on entity can
+ optionally be specified in `post_action` parameter.
+
+ :param action: Action which should be executed by service on entity.
+ :param entity: Entity we want to run action on.
+ :param action_condition: Function which is executed when checking if action should be executed.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param wait_condition: Function which is executed when waiting on result state.
+ :param pre_action: Function which is executed before running the action.
+ :param post_action: Function which is executed after running the action.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed to action.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ entity = pre_action(entity)
+
+ if entity is None:
+ self._module.fail_json(
+ msg="Entity not found, can't run action '{0}'.".format(
+ action
+ )
+ )
+
+ entity_service = self._service.service(entity.id)
+ entity = entity_service.get()
+ if action_condition(entity):
+ if not self._module.check_mode:
+ getattr(entity_service, action)(**kwargs)
+ self.changed = True
+
+ post_action(entity)
+
+ wait(
+ service=self._service.service(entity.id),
+ condition=wait_condition,
+ fail_condition=fail_condition,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def wait_for_import(self, condition=lambda e: True):
+ if self._module.params['wait']:
+ start = time.time()
+ timeout = self._module.params['timeout']
+ poll_interval = self._module.params['poll_interval']
+ while time.time() < start + timeout:
+ entity = self.search_entity()
+ if entity and condition(entity):
+ return entity
+ time.sleep(poll_interval)
+
+ def search_entity(self, search_params=None, list_params=None):
+ """
+ Always first try to search by `ID`, if ID isn't specified,
+ check if user constructed special search in `search_params`,
+ if not search by `name`.
+ """
+ entity = None
+
+ if 'id' in self._module.params and self._module.params['id'] is not None:
+ entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
+ elif search_params is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, **search_params)
+ elif self._module.params.get('name') is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
+
+ return entity
+
+ def _get_major(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.major)
+ return int(full_version.split('.')[0])
+
+ def _get_minor(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.minor)
+ return int(full_version.split('.')[1])
+
+
+def _sdk4_error_maybe():
+ """
+ Allow for ovirtsdk4 not being installed.
+ """
+ if HAS_SDK:
+ return sdk.Error
+ return type(None)
+
+
+class OvirtRetry(CloudRetry):
+ base_class = _sdk4_error_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.code
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This is a list of error codes to retry.
+ retry_on = [
+ # HTTP status: Conflict
+ 409,
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
new file mode 100644
index 00000000..3c87c1ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
@@ -0,0 +1,286 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+from ansible.module_utils.basic import env_fallback
+
+try:
+ import footmark
+ import footmark.ecs
+ import footmark.slb
+ import footmark.vpc
+ import footmark.rds
+ import footmark.ess
+ import footmark.sts
+ import footmark.dns
+ import footmark.ram
+ import footmark.market
+ HAS_FOOTMARK = True
+except ImportError:
+ HAS_FOOTMARK = False
+
+
+class AnsibleACSError(Exception):
+ pass
+
+
+def acs_common_argument_spec():
+ return dict(
+ alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
+ alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
+ alicloud_security_token=dict(aliases=['security_token'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
+ ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME']))
+ )
+
+
+def ecs_argument_spec():
+ spec = acs_common_argument_spec()
+ spec.update(
+ dict(
+ alicloud_region=dict(required=True, aliases=['region', 'region_id'],
+ fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
+ alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']),
+ aliases=['assume_role_arn']),
+ alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']),
+ aliases=['assume_role_session_name']),
+ alicloud_assume_role_session_expiration=dict(type='int',
+ fallback=(env_fallback,
+ ['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']),
+ aliases=['assume_role_session_expiration']),
+ alicloud_assume_role=dict(type='dict', aliases=['assume_role']),
+ profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])),
+ shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE']))
+ )
+ )
+ return spec
+
+
+def get_acs_connection_info(params):
+
+ ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'),
+ acs_secret_access_key=params.get('alicloud_secret_key'),
+ security_token=params.get('alicloud_security_token'),
+ ecs_role_name=params.get('ecs_role_name'),
+ user_agent='Ansible-Provider-Alicloud')
+ return ecs_params
+
+
+def connect_to_acs(acs_module, region, **params):
+ conn = acs_module.connect_to_region(region, **params)
+ if not conn:
+ if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
+ raise AnsibleACSError(
+ "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
+ else:
+ raise AnsibleACSError(
+ "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
+ return conn
+
+
+def get_assume_role(params):
+ """ Return new params """
+ sts_params = get_acs_connection_info(params)
+ assume_role = {}
+ if params.get('assume_role'):
+ assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn')
+ assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name')
+ assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration')
+ assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy')
+
+ assume_role_params = {
+ 'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'),
+ 'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name')
+ else assume_role.get('alicloud_assume_role_session_name'),
+ 'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration')
+ else assume_role.get('alicloud_assume_role_session_expiration', 3600),
+ 'policy': assume_role.get('alicloud_assume_role_policy', {})
+ }
+
+ try:
+ sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read()
+ sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \
+ = sts['access_key_id'], sts['access_key_secret'], sts['security_token']
+ except AnsibleACSError as e:
+ params.fail_json(msg=str(e))
+ return sts_params
+
+
+def get_profile(params):
+ if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']:
+ path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json'
+ auth = {}
+ with open(path, 'r') as f:
+ for pro in json.load(f)['profiles']:
+ if params['profile'] == pro['name']:
+ auth = pro
+ if auth:
+ if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'EcsRamRole':
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_assume_role_arn'] = auth.get('ram_role_arn')
+ params['alicloud_assume_role_session_name'] = auth.get('ram_session_name')
+ params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_assume_role(params)
+ elif params.get('alicloud_assume_role_arn') or params.get('assume_role'):
+ params = get_assume_role(params)
+ else:
+ params = get_acs_connection_info(params)
+ return params
+
+
+def ecs_connect(module):
+ """ Return an ecs connection"""
+ ecs_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ecs
+
+
+def slb_connect(module):
+ """ Return an slb connection"""
+ slb_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ slb = connect_to_acs(footmark.slb, region, **slb_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return slb
+
+
+def dns_connect(module):
+ """ Return an dns connection"""
+ dns_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ dns = connect_to_acs(footmark.dns, region, **dns_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return dns
+
+
+def vpc_connect(module):
+ """ Return an vpc connection"""
+ vpc_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return vpc
+
+
+def rds_connect(module):
+ """ Return an rds connection"""
+ rds_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ rds = connect_to_acs(footmark.rds, region, **rds_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return rds
+
+
+def ess_connect(module):
+ """ Return an ess connection"""
+ ess_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ess = connect_to_acs(footmark.ess, region, **ess_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ess
+
+
+def sts_connect(module):
+ """ Return an sts connection"""
+ sts_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ sts = connect_to_acs(footmark.sts, region, **sts_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return sts
+
+
+def ram_connect(module):
+ """ Return an ram connection"""
+ ram_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ram = connect_to_acs(footmark.ram, region, **ram_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ram
+
+
+def market_connect(module):
+ """ Return an market connection"""
+ market_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ market = connect_to_acs(footmark.market, region, **market_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return market
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..33b33084
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py
@@ -0,0 +1,208 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 00000000..db4e91b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2580 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from __future__ import unicode_literals
+__metaclass__ = type
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = "1.0.22"
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b"\0"[0] == 0: # Python 3 semantics
+
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+
+
+else:
+
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b"!B", b)[0] for b in byt]
+
+
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == "big"
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == "big"
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b"!I", intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, "bit_length"):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+
+
+else:
+
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 address" % address
+ )
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 network. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 network" % address
+ )
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 interface" % address
+ )
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split("/")
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if not (
+ isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)
+ ):
+ raise TypeError("first and last must be IP addresses, not networks")
+ if first.version != last.version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (first, last)
+ )
+ if first > last:
+ raise ValueError("last IP address must be greater than first")
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError("unknown IP version")
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(
+ _count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1,
+ )
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, nets[-1])
+ )
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = "%200s has no version specified" % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(
+ msg % (address, self._max_prefixlen, self._version)
+ )
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ "%r (len %d != %d) is not permitted as an IPv%d address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?"
+ )
+ raise AddressValueError(
+ msg % (address, address_len, expected_len, self._version)
+ )
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(
+ ip_int, cls._max_prefixlen
+ )
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, "big")
+ msg = "Netmask pattern %r mixes zeroes & ones"
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = "%r is not a valid netmask" % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return self._ip == other._ip and self._version == other._version
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return "%s/%d" % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError("address out of range")
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError("address out of range")
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (
+ self._version == other._version
+ and self.network_address == other.network_address
+ and int(self.netmask) == int(other.netmask)
+ )
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (
+ int(self.network_address)
+ <= int(other._ip)
+ <= int(self.broadcast_address)
+ )
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other
+ or (
+ other.network_address in self
+ or (other.broadcast_address in self)
+ )
+ )
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get("broadcast_address")
+ if x is None:
+ x = self._address_class(
+ int(self.network_address) | int(self.hostmask)
+ )
+ self._cache["broadcast_address"] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get("hostmask")
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache["hostmask"] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%d" % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = "%200s has no associated address class" % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError("%s not contained in %s" % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__(
+ "%s/%s" % (other.network_address, other.prefixlen)
+ )
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError("new prefix must be longer")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError("prefix length diff must be > 0")
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ "prefix length diff %d is invalid for netblock %s"
+ % (new_prefixlen, self)
+ )
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError("new prefix must be shorter")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ "current prefixlen is %d, cannot have a prefixlen_diff of %d"
+ % (self.prefixlen, prefixlen_diff)
+ )
+ return self.__class__(
+ (
+ int(self.network_address)
+ & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen,
+ )
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (
+ self.network_address.is_multicast
+ and self.broadcast_address.is_multicast
+ )
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (a, b)
+ )
+ return (
+ b.network_address <= a.network_address
+ and b.broadcast_address >= a.broadcast_address
+ )
+ except AttributeError:
+ raise TypeError(
+ "Unable to test subnet containment "
+ "between %s and %s" % (a, b)
+ )
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (
+ self.network_address.is_reserved
+ and self.broadcast_address.is_reserved
+ )
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (
+ self.network_address.is_link_local
+ and self.broadcast_address.is_link_local
+ )
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (
+ self.network_address.is_private
+ and self.broadcast_address.is_private
+ )
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (
+ self.network_address.is_unspecified
+ and self.broadcast_address.is_unspecified
+ )
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (
+ self.network_address.is_loopback
+ and self.broadcast_address.is_loopback
+ )
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset("0123456789")
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ octets = ip_str.split(".")
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), "big"
+ )
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == "0":
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return ".".join(
+ _compat_str(
+ struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b
+ )
+ for b in _compat_to_bytes(ip_int, 4, "big")
+ )
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split(".")
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split(".")[::-1]
+ return ".".join(reverse_octets) + ".in-addr.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and not self.is_private
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv4Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv4Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv4Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (
+ not (
+ self.network_address in IPv4Network("100.64.0.0/10")
+ and self.broadcast_address in IPv4Network("100.64.0.0/10")
+ )
+ and not self.is_private
+ )
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network("169.254.0.0/16")
+
+ _loopback_network = IPv4Network("127.0.0.0/8")
+
+ _multicast_network = IPv4Network("224.0.0.0/4")
+
+ _public_network = IPv4Network("100.64.0.0/10")
+
+ _private_networks = [
+ IPv4Network("0.0.0.0/8"),
+ IPv4Network("10.0.0.0/8"),
+ IPv4Network("127.0.0.0/8"),
+ IPv4Network("169.254.0.0/16"),
+ IPv4Network("172.16.0.0/12"),
+ IPv4Network("192.0.0.0/29"),
+ IPv4Network("192.0.0.170/31"),
+ IPv4Network("192.0.2.0/24"),
+ IPv4Network("192.168.0.0/16"),
+ IPv4Network("198.18.0.0/15"),
+ IPv4Network("198.51.100.0/24"),
+ IPv4Network("203.0.113.0/24"),
+ IPv4Network("240.0.0.0/4"),
+ IPv4Network("255.255.255.255/32"),
+ ]
+
+ _reserved_network = IPv4Network("240.0.0.0/4")
+
+ _unspecified_address = IPv4Address("0.0.0.0")
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ parts = ip_str.split(":")
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if "." in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append("%x" % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1,
+ ip_str,
+ )
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == "0":
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (
+ best_doublecolon_start + best_doublecolon_len
+ )
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += [""]
+ hextets[best_doublecolon_start:best_doublecolon_end] = [""]
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [""] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError("IPv6 address is too large")
+
+ hex_str = "%032x" % ip_int
+ hextets = ["%x" % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ":".join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = "%032x" % ip_int
+ parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return "%s/%d" % (":".join(parts), self._prefixlen)
+ return ":".join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(":", "")
+ return ".".join(reverse_chars) + ".ip6.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (
+ IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF),
+ )
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv6Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv6Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv6Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (
+ self.network_address.is_site_local
+ and self.broadcast_address.is_site_local
+ )
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network("fe80::/10")
+
+ _multicast_network = IPv6Network("ff00::/8")
+
+ _private_networks = [
+ IPv6Network("::1/128"),
+ IPv6Network("::/128"),
+ IPv6Network("::ffff:0:0/96"),
+ IPv6Network("100::/64"),
+ IPv6Network("2001::/23"),
+ IPv6Network("2001:2::/48"),
+ IPv6Network("2001:db8::/32"),
+ IPv6Network("2001:10::/28"),
+ IPv6Network("fc00::/7"),
+ IPv6Network("fe80::/10"),
+ ]
+
+ _reserved_networks = [
+ IPv6Network("::/8"),
+ IPv6Network("100::/8"),
+ IPv6Network("200::/7"),
+ IPv6Network("400::/6"),
+ IPv6Network("800::/5"),
+ IPv6Network("1000::/4"),
+ IPv6Network("4000::/3"),
+ IPv6Network("6000::/3"),
+ IPv6Network("8000::/3"),
+ IPv6Network("A000::/3"),
+ IPv6Network("C000::/3"),
+ IPv6Network("E000::/4"),
+ IPv6Network("F000::/5"),
+ IPv6Network("F800::/6"),
+ IPv6Network("FE00::/9"),
+ ]
+
+ _sitelocal_network = IPv6Network("fec0::/10")
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py
new file mode 100644
index 00000000..67850308
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py
@@ -0,0 +1,189 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+# Input patterns for is_input_dangerous function:
+#
+# 1. '"' in string and '--' in string or
+# "'" in string and '--' in string
+PATTERN_1 = re.compile(r'(\'|\").*--')
+
+# 2. union \ intersect \ except + select
+PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
+
+# 3. ';' and any KEY_WORDS
+PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
+
+
+def is_input_dangerous(string):
+ """Check if the passed string is potentially dangerous.
+ Can be used to prevent SQL injections.
+
+ Note: use this function only when you can't use
+ psycopg2's cursor.execute method parametrized
+ (typically with DDL queries).
+ """
+ if not string:
+ return False
+
+ for pattern in (PATTERN_1, PATTERN_2, PATTERN_3):
+ if re.search(pattern, string):
+ return True
+
+ return False
+
+
+def check_input(module, *args):
+ """Wrapper for is_input_dangerous function."""
+ needs_to_check = args
+
+ dangerous_elements = []
+
+ for elem in needs_to_check:
+ if isinstance(elem, str):
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ elif isinstance(elem, list):
+ for e in elem:
+ if is_input_dangerous(e):
+ dangerous_elements.append(e)
+
+ elif elem is None or isinstance(elem, bool):
+ pass
+
+ else:
+ elem = str(elem)
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ if dangerous_elements:
+ module.fail_json(msg="Passed input '%s' is "
+ "potentially dangerous" % ', '.join(dangerous_elements))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
new file mode 100644
index 00000000..bcb02e84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Mark Maglana <mmaglana@gmail.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# Common functionality to be used by various module components
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves import configparser
+from os.path import expanduser
+from uuid import UUID
+
+LIBCLOUD_IMP_ERR = None
+try:
+ from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
+ from libcloud.compute.base import Node, NodeLocation
+ from libcloud.compute.providers import get_driver
+ from libcloud.compute.types import Provider
+
+ import libcloud.security
+
+ HAS_LIBCLOUD = True
+except ImportError:
+ LIBCLOUD_IMP_ERR = traceback.format_exc()
+ HAS_LIBCLOUD = False
+
+# MCP 2.x version patten for location (datacenter) names.
+#
+# Note that this is not a totally reliable way of determining MCP version.
+# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
+# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
+# by specifying it in the module parameters.
+MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
+
+
+class DimensionDataModule(object):
+ """
+ The base class containing common functionality used by Dimension Data modules for Ansible.
+ """
+
+ def __init__(self, module):
+ """
+ Create a new DimensionDataModule.
+
+ Will fail if Apache libcloud is not present.
+
+ :param module: The underlying Ansible module.
+ :type module: AnsibleModule
+ """
+
+ self.module = module
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
+
+ # Credentials are common to all Dimension Data modules.
+ credentials = self.get_credentials()
+ self.user_id = credentials['user_id']
+ self.key = credentials['key']
+
+ # Region and location are common to all Dimension Data modules.
+ region = self.module.params['region']
+ self.region = 'dd-{0}'.format(region)
+ self.location = self.module.params['location']
+
+ libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
+
+ self.driver = get_driver(Provider.DIMENSIONDATA)(
+ self.user_id,
+ self.key,
+ region=self.region
+ )
+
+ # Determine the MCP API version (this depends on the target datacenter).
+ self.mcp_version = self.get_mcp_version(self.location)
+
+ # Optional "wait-for-completion" arguments
+ if 'wait' in self.module.params:
+ self.wait = self.module.params['wait']
+ self.wait_time = self.module.params['wait_time']
+ self.wait_poll_interval = self.module.params['wait_poll_interval']
+ else:
+ self.wait = False
+ self.wait_time = 0
+ self.wait_poll_interval = 0
+
+ def get_credentials(self):
+ """
+ Get user_id and key from module configuration, environment, or dotfile.
+ Order of priority is module, environment, dotfile.
+
+ To set in environment:
+
+ export MCP_USER='myusername'
+ export MCP_PASSWORD='mypassword'
+
+ To set in dot file place a file at ~/.dimensiondata with
+ the following contents:
+
+ [dimensiondatacloud]
+ MCP_USER: myusername
+ MCP_PASSWORD: mypassword
+ """
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg='libcloud is required for this module.')
+
+ user_id = None
+ key = None
+
+ # First, try the module configuration
+ if 'mcp_user' in self.module.params:
+ if 'mcp_password' not in self.module.params:
+ self.module.fail_json(
+ msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
+ )
+
+ user_id = self.module.params['mcp_user']
+ key = self.module.params['mcp_password']
+
+ # Fall back to environment
+ if not user_id or not key:
+ user_id = os.environ.get('MCP_USER', None)
+ key = os.environ.get('MCP_PASSWORD', None)
+
+ # Finally, try dotfile (~/.dimensiondata)
+ if not user_id or not key:
+ home = expanduser('~')
+ config = configparser.RawConfigParser()
+ config.read("%s/.dimensiondata" % home)
+
+ try:
+ user_id = config.get("dimensiondatacloud", "MCP_USER")
+ key = config.get("dimensiondatacloud", "MCP_PASSWORD")
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ pass
+
+ # One or more credentials not found. Function can't recover from this
+ # so it has to raise an error instead of fail silently.
+ if not user_id:
+ raise MissingCredentialsError("Dimension Data user id not found")
+ elif not key:
+ raise MissingCredentialsError("Dimension Data key not found")
+
+ # Both found, return data
+ return dict(user_id=user_id, key=key)
+
+ def get_mcp_version(self, location):
+ """
+ Get the MCP version for the specified location.
+ """
+
+ location = self.driver.ex_get_location_by_id(location)
+ if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
+ return '2.0'
+
+ return '1.0'
+
+ def get_network_domain(self, locator, location):
+ """
+ Retrieve a network domain by its name or Id.
+ """
+
+ if is_uuid(locator):
+ network_domain = self.driver.ex_get_network_domain(locator)
+ else:
+ matching_network_domains = [
+ network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
+ if network_domain.name == locator
+ ]
+
+ if matching_network_domains:
+ network_domain = matching_network_domains[0]
+ else:
+ network_domain = None
+
+ if network_domain:
+ return network_domain
+
+ raise UnknownNetworkError("Network '%s' could not be found" % locator)
+
+ def get_vlan(self, locator, location, network_domain):
+ """
+ Get a VLAN object by its name or id
+ """
+ if is_uuid(locator):
+ vlan = self.driver.ex_get_vlan(locator)
+ else:
+ matching_vlans = [
+ vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
+ if vlan.name == locator
+ ]
+
+ if matching_vlans:
+ vlan = matching_vlans[0]
+ else:
+ vlan = None
+
+ if vlan:
+ return vlan
+
+ raise UnknownVLANError("VLAN '%s' could not be found" % locator)
+
+ @staticmethod
+ def argument_spec(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = dict(
+ region=dict(type='str', default='na'),
+ mcp_user=dict(type='str', required=False),
+ mcp_password=dict(type='str', required=False, no_log=True),
+ location=dict(type='str', required=True),
+ validate_certs=dict(type='bool', required=False, default=True)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def argument_spec_with_wait(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = DimensionDataModule.argument_spec(
+ wait=dict(type='bool', required=False, default=False),
+ wait_time=dict(type='int', required=False, default=600),
+ wait_poll_interval=dict(type='int', required=False, default=2)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def required_together(*additional_required_together):
+ """
+ Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
+ :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
+ :return: An array containing the argument specifications.
+ """
+
+ required_together = [
+ ['mcp_user', 'mcp_password']
+ ]
+
+ if additional_required_together:
+ required_together.extend(additional_required_together)
+
+ return required_together
+
+
+class LibcloudNotFound(Exception):
+ """
+ Exception raised when Apache libcloud cannot be found.
+ """
+
+ pass
+
+
+class MissingCredentialsError(Exception):
+ """
+ Exception raised when credentials for Dimension Data CloudControl cannot be found.
+ """
+
+ pass
+
+
+class UnknownNetworkError(Exception):
+ """
+ Exception raised when a network or network domain cannot be found.
+ """
+
+ pass
+
+
+class UnknownVLANError(Exception):
+ """
+ Exception raised when a VLAN cannot be found.
+ """
+
+ pass
+
+
+def get_dd_regions():
+ """
+ Get the list of available regions whose vendor is Dimension Data.
+ """
+
+ # Get endpoints
+ all_regions = API_ENDPOINTS.keys()
+
+ # Only Dimension Data endpoints (no prefix)
+ regions = [region[3:] for region in all_regions if region.startswith('dd-')]
+
+ return regions
+
+
+def is_uuid(u, version=4):
+ """
+ Test if valid v4 UUID
+ """
+ try:
+ uuid_obj = UUID(u, version=version)
+
+ return str(uuid_obj) == u
+ except ValueError:
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py
new file mode 100644
index 00000000..03307250
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py
@@ -0,0 +1,1022 @@
+#
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClient(Client):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = self.module.params.get(key)
+
+ if self.module.params.get('use_tls'):
+ # support use_tls option in docker_image.py. This will be deprecated.
+ use_tls = self.module.params.get('use_tls')
+ if use_tls == 'encrypt':
+ params['tls'] = True
+ if use_tls == 'verify':
+ params['validate_certs'] = True
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py
new file mode 100644
index 00000000..610ed9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py
new file mode 100644
index 00000000..1c61510f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.dns.types import Provider
+ from libcloud.dns.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gcdns"
+USER_AGENT_VERSION = "v1"
+
+
+def gcdns_connect(module, provider=None):
+ """Return a GCP connection for Google Cloud DNS."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ provider = provider or Provider.GOOGLE
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py
new file mode 100644
index 00000000..292bb8b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gce"
+USER_AGENT_VERSION = "v1"
+
+
+def gce_connect(module, provider=None):
+ """Return a GCP connection for Google Compute Engine."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+ provider = provider or Provider.GCE
+
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py
new file mode 100644
index 00000000..a034f3b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py
@@ -0,0 +1,799 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+# libcloud
+try:
+ import libcloud
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+# google-auth
+try:
+ import google.auth
+ from google.oauth2 import service_account
+ HAS_GOOGLE_AUTH = True
+except ImportError:
+ HAS_GOOGLE_AUTH = False
+
+# google-python-api
+try:
+ import google_auth_httplib2
+ from httplib2 import Http
+ from googleapiclient.http import set_user_agent
+ from googleapiclient.errors import HttpError
+ from apiclient.discovery import build
+ HAS_GOOGLE_API_LIB = True
+except ImportError:
+ HAS_GOOGLE_API_LIB = False
+
+
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
+
+
+def _get_gcp_ansible_credentials(module):
+ """Helper to fetch creds from AnsibleModule object."""
+ service_account_email = module.params.get('service_account_email', None)
+ # Note: pem_file is discouraged and will be deprecated
+ credentials_file = module.params.get('pem_file', None) or module.params.get(
+ 'credentials_file', None)
+ project_id = module.params.get('project_id', None)
+
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_environ_var(var_name, default_value):
+ """Wrapper around os.environ.get call."""
+ return os.environ.get(
+ var_name, default_value)
+
+
+def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
+ """Helper to look in environment variables for credentials."""
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ if not service_account_email:
+ service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
+ if not credentials_file:
+ credentials_file = _get_gcp_environ_var(
+ 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
+ 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
+ 'GCE_PEM_FILE_PATH', None)
+ if not project_id:
+ project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
+ 'GOOGLE_CLOUD_PROJECT', None)
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
+ """
+ Obtain GCP credentials by trying various methods.
+
+ There are 3 ways to specify GCP credentials:
+ 1. Specify via Ansible module parameters (recommended).
+ 2. Specify via environment variables. Two sets of env vars are available:
+ a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
+ b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
+ using p12 key)
+ 3. Specify via libcloud secrets.py file (deprecated).
+
+ There are 3 helper functions to assist in the above.
+
+ Regardless of method, the user also has the option of specifying a JSON
+ file or a p12 file as the credentials file. JSON is strongly recommended and
+ p12 will be removed in the future.
+
+ Additionally, flags may be set to require valid json and check the libcloud
+ version.
+
+ AnsibleModule.fail_json is called only if the project_id cannot be found.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param require_valid_json: If true, require credentials to be valid JSON. Default is True.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :return: {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+ :rtype: ``dict``
+ """
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_ansible_credentials(module)
+
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_environment_credentials(service_account_email,
+ credentials_file, project_id)
+
+ if credentials_file is None or project_id is None or service_account_email is None:
+ if check_libcloud is True:
+ if project_id is None:
+ # TODO(supertom): this message is legacy and integration tests
+ # depend on it.
+ module.fail_json(msg='Missing GCE connection parameters in libcloud '
+ 'secrets file.')
+ else:
+ if project_id is None:
+ module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
+ 'credentials file (%s)' % (project_id, credentials_file)))
+ # Set these fields to empty strings if they are None
+ # consumers of this will make the distinction between an empty string
+ # and None.
+ if credentials_file is None:
+ credentials_file = ''
+ if service_account_email is None:
+ service_account_email = ''
+
+ # ensure the credentials file is found and is in the proper format.
+ if credentials_file:
+ _validate_credentials_file(module, credentials_file,
+ require_valid_json=require_valid_json,
+ check_libcloud=check_libcloud)
+
+ return {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+
+
+def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
+ """
+ Check for valid credentials file.
+
+ Optionally check for JSON format and if libcloud supports JSON.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param credentials_file: path to file on disk
+ :type credentials_file: ``str``. Complete path to file on disk.
+
+ :param require_valid_json: This argument is ignored as of Ansible 2.7.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :returns: True
+ :rtype: ``bool``
+ """
+ try:
+ # Try to read credentials as JSON
+ with open(credentials_file) as credentials:
+ json.loads(credentials.read())
+ # If the credentials are proper JSON and we do not have the minimum
+ # required libcloud version, bail out and return a descriptive
+ # error
+ if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
+ module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
+ 'Upgrade to libcloud>=0.17.0.')
+ return True
+ except IOError as e:
+ module.fail_json(msg='GCP Credentials File %s not found.' %
+ credentials_file, changed=False)
+ return False
+ except ValueError as e:
+ module.fail_json(
+ msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
+ changed=False)
+
+
+def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
+ """Return a Google libcloud driver connection."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ creds = _get_gcp_credentials(module,
+ require_valid_json=False,
+ check_libcloud=True)
+ try:
+ gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
+ datacenter=module.params.get('zone', None),
+ project=creds['project_id'])
+ gcp.connection.user_agent_append("%s/%s" % (
+ user_agent_product, user_agent_version))
+ except (RuntimeError, ValueError) as e:
+ module.fail_json(msg=str(e), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ return gcp
+
+
+def get_google_cloud_credentials(module, scopes=None):
+ """
+ Get credentials object for use with Google Cloud client.
+
+ Attempts to obtain credentials by calling _get_gcp_credentials. If those are
+ not present will attempt to connect via Application Default Credentials.
+
+ To connect via libcloud, don't use this function, use gcp_connect instead. For
+ Google Python API Client, see get_google_api_auth for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google Cloud example:
+ creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type module: ``list`` of URIs
+
+ :returns: A tuple containing (google authorized) credentials object and
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_AUTH:
+ module.fail_json(msg='Please install google-auth.')
+
+ conn_params = _get_gcp_credentials(module,
+ require_valid_json=True,
+ check_libcloud=False)
+ try:
+ if conn_params['credentials_file']:
+ credentials = service_account.Credentials.from_service_account_file(
+ conn_params['credentials_file'])
+ if scopes:
+ credentials = credentials.with_scopes(scopes)
+ else:
+ (credentials, project_id) = google.auth.default(
+ scopes=scopes)
+ if project_id is not None:
+ conn_params['project_id'] = project_id
+
+ return (credentials, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
+ """
+ Authentication for use with google-python-api-client.
+
+ Function calls get_google_cloud_credentials, which attempts to assemble the credentials
+ from various locations. Next it attempts to authenticate with Google.
+
+ This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
+
+ For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
+ get_google_cloud_credentials for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google API example:
+ http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
+ service = build('myservice', 'v1', http=http_auth)
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type scopes: ``list`` of URIs
+
+ :param user_agent_product: User agent product. eg: 'ansible-python-api'
+ :type user_agent_product: ``str``
+
+ :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
+ :type user_agent_version: ``str``
+
+ :returns: A tuple containing (google authorized) httplib2 request object and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_API_LIB:
+ module.fail_json(msg="Please install google-api-python-client library")
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+ try:
+ (credentials, conn_params) = get_google_cloud_credentials(module, scopes)
+ http = set_user_agent(Http(), '%s-%s' %
+ (user_agent_product, user_agent_version))
+ http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
+
+ return (http_auth, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_client(module, service, user_agent_product, user_agent_version,
+ scopes=None, api_version='v1'):
+ """
+ Get the discovery-based python client. Use when a cloud client is not available.
+
+ client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ :returns: A tuple containing the authorized client to the specified service and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+
+ http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
+ user_agent_product=user_agent_product,
+ user_agent_version=user_agent_version)
+ client = build(service, api_version, http=http_auth)
+
+ return (client, conn_params)
+
+
+def check_min_pkg_version(pkg_name, minimum_version):
+ """Minimum required version is >= installed version."""
+ from pkg_resources import get_distribution
+ try:
+ installed_version = get_distribution(pkg_name).version
+ return LooseVersion(installed_version) >= minimum_version
+ except Exception as e:
+ return False
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
+
+
+def get_valid_location(module, driver, location, location_type='zone'):
+ if location_type == 'zone':
+ l = driver.ex_get_zone(location)
+ else:
+ l = driver.ex_get_region(location)
+ if l is None:
+ link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
+ module.fail_json(msg=('%s %s is invalid. Please see the list of '
+ 'available %s at %s' % (
+ location_type, location, location_type, link)),
+ changed=False)
+ return l
+
+
+def check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if 'required' in d and d['required'] is True:
+ raise ValueError(("%s is required and must be of type: %s" %
+ (d['name'], str(d['type']))))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
+ d['name'], str(d['type']), params[d['name']],
+ type(params[d['name']]))))
+ if 'values' in d:
+ if params[d['name']] not in d['values']:
+ raise ValueError(("%s must be one of: %s" % (
+ d['name'], ','.join(d['values']))))
+ if isinstance(params[d['name']], int):
+ if 'min' in d:
+ if params[d['name']] < d['min']:
+ raise ValueError(("%s must be greater than or equal to: %s" % (
+ d['name'], d['min'])))
+ if 'max' in d:
+ if params[d['name']] > d['max']:
+ raise ValueError("%s must be less than or equal to: %s" % (
+ d['name'], d['max']))
+ return True
+
+
+class GCPUtils(object):
+ """
+ Helper utilities for GCP.
+ """
+
+ @staticmethod
+ def underscore_to_camel(txt):
+ return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
+
+ @staticmethod
+ def remove_non_gcp_params(params):
+ """
+ Remove params if found.
+ """
+ params_to_remove = ['state']
+ for p in params_to_remove:
+ if p in params:
+ del params[p]
+
+ return params
+
+ @staticmethod
+ def params_to_gcp_dict(params, resource_name=None):
+ """
+ Recursively convert ansible params to GCP Params.
+
+ Keys are converted from snake to camelCase
+ ex: default_service to defaultService
+
+ Handles lists, dicts and strings
+
+ special provision for the resource name
+ """
+ if not isinstance(params, dict):
+ return params
+ gcp_dict = {}
+ params = GCPUtils.remove_non_gcp_params(params)
+ for k, v in params.items():
+ gcp_key = GCPUtils.underscore_to_camel(k)
+ if isinstance(v, dict):
+ retval = GCPUtils.params_to_gcp_dict(v)
+ gcp_dict[gcp_key] = retval
+ elif isinstance(v, list):
+ gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
+ else:
+ if resource_name and k == resource_name:
+ gcp_dict['name'] = v
+ else:
+ gcp_dict[gcp_key] = v
+ return gcp_dict
+
+ @staticmethod
+ def execute_api_client_req(req, client=None, raw=True,
+ operation_timeout=180, poll_interval=5,
+ raise_404=True):
+ """
+ General python api client interaction function.
+
+ For use with google-api-python-client, or clients created
+ with get_google_api_client function
+ Not for use with Google Cloud client libraries
+
+ For long-running operations, we make an immediate query and then
+ sleep poll_interval before re-querying. After the request is done
+ we rebuild the request with a get method and return the result.
+
+ """
+ try:
+ resp = req.execute()
+
+ if not resp:
+ return None
+
+ if raw:
+ return resp
+
+ if resp['kind'] == 'compute#operation':
+ resp = GCPUtils.execute_api_client_operation_req(req, resp,
+ client,
+ operation_timeout,
+ poll_interval)
+
+ if 'items' in resp:
+ return resp['items']
+
+ return resp
+ except HttpError as h:
+ # Note: 404s can be generated (incorrectly) for dependent
+ # resources not existing. We let the caller determine if
+ # they want 404s raised for their invocation.
+ if h.resp.status == 404 and not raise_404:
+ return None
+ else:
+ raise
+ except Exception:
+ raise
+
+ @staticmethod
+ def execute_api_client_operation_req(orig_req, op_resp, client,
+ operation_timeout=180, poll_interval=5):
+ """
+ Poll an operation for a result.
+ """
+ parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
+ project_id = parsed_url['project']
+ resource_name = GCPUtils.get_gcp_resource_from_methodId(
+ orig_req.methodId)
+ resource = GCPUtils.build_resource_from_name(client, resource_name)
+
+ start_time = time.time()
+
+ complete = False
+ attempts = 1
+ while not complete:
+ if start_time + operation_timeout >= time.time():
+ op_req = client.globalOperations().get(
+ project=project_id, operation=op_resp['name'])
+ op_resp = op_req.execute()
+ if op_resp['status'] != 'DONE':
+ time.sleep(poll_interval)
+ attempts += 1
+ else:
+ complete = True
+ if op_resp['operationType'] == 'delete':
+ # don't wait for the delete
+ return True
+ elif op_resp['operationType'] in ['insert', 'update', 'patch']:
+ # TODO(supertom): Isolate 'build-new-request' stuff.
+ resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
+ resource_name)
+ if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
+ parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
+ 'entity_name']
+ args = {'project': project_id,
+ resource_name_singular: parsed_url['entity_name']}
+ new_req = resource.get(**args)
+ resp = new_req.execute()
+ return resp
+ else:
+ # assuming multiple entities, do a list call.
+ new_req = resource.list(project=project_id)
+ resp = new_req.execute()
+ return resp
+ else:
+ # operation didn't complete on time.
+ raise GCPOperationTimeoutError("Operation timed out: %s" % (
+ op_resp['targetLink']))
+
+ @staticmethod
+ def build_resource_from_name(client, resource_name):
+ try:
+ method = getattr(client, resource_name)
+ return method()
+ except AttributeError:
+ raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
+ client))
+
+ @staticmethod
+ def get_gcp_resource_from_methodId(methodId):
+ try:
+ parts = methodId.split('.')
+ if len(parts) != 3:
+ return None
+ else:
+ return parts[1]
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def get_entity_name_from_resource_name(resource_name):
+ if not resource_name:
+ return None
+
+ try:
+ # Chop off global or region prefixes
+ if resource_name.startswith('global'):
+ resource_name = resource_name.replace('global', '')
+ elif resource_name.startswith('regional'):
+ resource_name = resource_name.replace('region', '')
+
+ # ensure we have a lower case first letter
+ resource_name = resource_name[0].lower() + resource_name[1:]
+
+ if resource_name[-3:] == 'ies':
+ return resource_name.replace(
+ resource_name[-3:], 'y')
+ if resource_name[-1] == 's':
+ return resource_name[:-1]
+
+ return resource_name
+
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def parse_gcp_url(url):
+ """
+ Parse GCP urls and return dict of parts.
+
+ Supported URL structures:
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
+
+ :param url: GCP-generated URL, such as a selflink or resource location.
+ :type url: ``str``
+
+ :return: dictionary of parts. Includes stanard components of urlparse, plus
+ GCP-specific 'service', 'api_version', 'project' and
+ 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
+ and 'method_name', if applicable.
+ :rtype: ``dict``
+ """
+
+ p = urlparse.urlparse(url)
+ if not p:
+ return None
+ else:
+ # we add extra items such as
+ # zone, region and resource_name
+ url_parts = {}
+ url_parts['scheme'] = p.scheme
+ url_parts['host'] = p.netloc
+ url_parts['path'] = p.path
+ if p.path.find('/') == 0:
+ url_parts['path'] = p.path[1:]
+ url_parts['params'] = p.params
+ url_parts['fragment'] = p.fragment
+ url_parts['query'] = p.query
+ url_parts['project'] = None
+ url_parts['service'] = None
+ url_parts['api_version'] = None
+
+ path_parts = url_parts['path'].split('/')
+ url_parts['service'] = path_parts[0]
+ url_parts['api_version'] = path_parts[1]
+ if path_parts[2] == 'projects':
+ url_parts['project'] = path_parts[3]
+ else:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ if 'global' in path_parts:
+ url_parts['global'] = True
+ idx = path_parts.index('global')
+ if len(path_parts) - idx == 4:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+ url_parts['method_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx == 2:
+ url_parts['resource_name'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx < 2:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ elif 'regions' in path_parts or 'zones' in path_parts:
+ idx = -1
+ if 'regions' in path_parts:
+ idx = path_parts.index('regions')
+ url_parts['region'] = path_parts[idx + 1]
+ else:
+ idx = path_parts.index('zones')
+ url_parts['zone'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ else:
+ # no location in URL.
+ idx = path_parts.index('projects')
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ return url_parts
+
+ @staticmethod
+ def build_googleapi_url(project, api_version='v1', service='compute'):
+ return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
+
+ @staticmethod
+ def filter_gcp_fields(params, excluded_fields=None):
+ new_params = {}
+ if not excluded_fields:
+ excluded_fields = ['creationTimestamp', 'id', 'kind',
+ 'selfLink', 'fingerprint', 'description']
+
+ if isinstance(params, list):
+ new_params = [GCPUtils.filter_gcp_fields(
+ x, excluded_fields) for x in params]
+ elif isinstance(params, dict):
+ for k in params.keys():
+ if k not in excluded_fields:
+ new_params[k] = GCPUtils.filter_gcp_fields(
+ params[k], excluded_fields)
+ else:
+ new_params = params
+
+ return new_params
+
+ @staticmethod
+ def are_params_equal(p1, p2):
+ """
+ Check if two params dicts are equal.
+ TODO(supertom): need a way to filter out URLs, or they need to be built
+ """
+ filtered_p1 = GCPUtils.filter_gcp_fields(p1)
+ filtered_p2 = GCPUtils.filter_gcp_fields(p2)
+ if filtered_p1 != filtered_p2:
+ return False
+ return True
+
+
+class GCPError(Exception):
+ pass
+
+
+class GCPOperationTimeoutError(GCPError):
+ pass
+
+
+class GCPInvalidURLError(GCPError):
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py
new file mode 100644
index 00000000..e13f38c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+try:
+ from urllib import quote_plus # Python 2.X
+except ImportError:
+ from urllib.parse import quote_plus # Python 3+
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+
+def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
+ url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
+ headers = {}
+ if access_token:
+ headers['Authorization'] = "Bearer %s" % access_token
+ else:
+ headers['Private-Token'] = private_token
+
+ headers['Accept'] = "application/json"
+ headers['Content-Type'] = "application/json"
+
+ response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
+ status = info['status']
+ content = ""
+ if response:
+ content = response.read()
+ if status == 204:
+ return True, content
+ elif status == 200 or status == 201:
+ return True, json.loads(content)
+ else:
+ return False, str(status) + ": " + content
+
+
+def findProject(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.projects.get(identifier)
+ except Exception as e:
+ current_user = gitlab_instance.user
+ try:
+ project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def findGroup(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.groups.get(identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def gitlabAuthentication(module):
+ gitlab_url = module.params['api_url']
+ validate_certs = module.params['validate_certs']
+ gitlab_user = module.params['api_username']
+ gitlab_password = module.params['api_password']
+ gitlab_token = module.params['api_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ try:
+ # python-gitlab library remove support for username/password authentication since 1.13.0
+ # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
+ # This condition allow to still support older version of the python-gitlab library
+ if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
+ private_token=gitlab_token, api_version=4)
+ else:
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
+
+ gitlab_instance.auth()
+ except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
+ except (gitlab.exceptions.GitlabHttpError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s. \
+ GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
+
+ return gitlab_instance
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py
new file mode 100644
index 00000000..b6e89614
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py
@@ -0,0 +1,41 @@
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+HAS_HEROKU = False
+HEROKU_IMP_ERR = None
+try:
+ import heroku3
+ HAS_HEROKU = True
+except ImportError:
+ HEROKU_IMP_ERR = traceback.format_exc()
+
+
+class HerokuHelper():
+ def __init__(self, module):
+ self.module = module
+ self.check_lib()
+ self.api_key = module.params["api_key"]
+
+ def check_lib(self):
+ if not HAS_HEROKU:
+ self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
+
+ @staticmethod
+ def heroku_argument_spec():
+ return dict(
+ api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
+
+ def get_heroku_client(self):
+ client = heroku3.from_key(self.api_key)
+
+ if not client.is_authenticated:
+ self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
+
+ return client
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py
new file mode 100644
index 00000000..2bc3d166
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Felix Fontein <felix@fontein.de>, 2019
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+import time
+
+
+HETZNER_DEFAULT_ARGUMENT_SPEC = dict(
+ hetzner_user=dict(type='str', required=True),
+ hetzner_password=dict(type='str', required=True, no_log=True),
+)
+
+# The API endpoint is fixed.
+BASE_URL = "https://robot-ws.your-server.de"
+
+
+def fetch_url_json(module, url, method='GET', timeout=10, data=None, headers=None, accept_errors=None):
+ '''
+ Make general request to Hetzner's JSON robot API.
+ '''
+ module.params['url_username'] = module.params['hetzner_user']
+ module.params['url_password'] = module.params['hetzner_password']
+ resp, info = fetch_url(module, url, method=method, timeout=timeout, data=data, headers=headers)
+ try:
+ content = resp.read()
+ except AttributeError:
+ content = info.pop('body', None)
+
+ if not content:
+ module.fail_json(msg='Cannot retrieve content from {0}'.format(url))
+
+ try:
+ result = module.from_json(content.decode('utf8'))
+ if 'error' in result:
+ if accept_errors:
+ if result['error']['code'] in accept_errors:
+ return result, result['error']['code']
+ module.fail_json(msg='Request failed: {0} {1} ({2})'.format(
+ result['error']['status'],
+ result['error']['code'],
+ result['error']['message']
+ ))
+ return result, None
+ except ValueError:
+ module.fail_json(msg='Cannot decode content retrieved from {0}'.format(url))
+
+
+class CheckDoneTimeoutException(Exception):
+ def __init__(self, result, error):
+ super(CheckDoneTimeoutException, self).__init__()
+ self.result = result
+ self.error = error
+
+
+def fetch_url_json_with_retries(module, url, check_done_callback, check_done_delay=10, check_done_timeout=180, skip_first=False, **kwargs):
+ '''
+ Make general request to Hetzner's JSON robot API, with retries until a condition is satisfied.
+
+ The condition is tested by calling ``check_done_callback(result, error)``. If it is not satisfied,
+ it will be retried with delays ``check_done_delay`` (in seconds) until a total timeout of
+ ``check_done_timeout`` (in seconds) since the time the first request is started is reached.
+
+ If ``skip_first`` is specified, will assume that a first call has already been made and will
+ directly start with waiting.
+ '''
+ start_time = time.time()
+ if not skip_first:
+ result, error = fetch_url_json(module, url, **kwargs)
+ if check_done_callback(result, error):
+ return result, error
+ while True:
+ elapsed = (time.time() - start_time)
+ left_time = check_done_timeout - elapsed
+ time.sleep(max(min(check_done_delay, left_time), 0))
+ result, error = fetch_url_json(module, url, **kwargs)
+ if check_done_callback(result, error):
+ return result, error
+ if left_time < check_done_delay:
+ raise CheckDoneTimeoutException(result, error)
+
+
+# #####################################################################################
+# ## FAILOVER IP ######################################################################
+
+def get_failover_record(module, ip):
+ '''
+ Get information record of failover IP.
+
+ See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
+ '''
+ url = "{0}/failover/{1}".format(BASE_URL, ip)
+ result, error = fetch_url_json(module, url)
+ if 'failover' not in result:
+ module.fail_json(msg='Cannot interpret result: {0}'.format(result))
+ return result['failover']
+
+
+def get_failover(module, ip):
+ '''
+ Get current routing target of failover IP.
+
+ The value ``None`` represents unrouted.
+
+ See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
+ '''
+ return get_failover_record(module, ip)['active_server_ip']
+
+
+def set_failover(module, ip, value, timeout=180):
+ '''
+ Set current routing target of failover IP.
+
+ Return a pair ``(value, changed)``. The value ``None`` for ``value`` represents unrouted.
+
+ See https://robot.your-server.de/doc/webservice/en.html#post-failover-failover-ip
+ and https://robot.your-server.de/doc/webservice/en.html#delete-failover-failover-ip
+ '''
+ url = "{0}/failover/{1}".format(BASE_URL, ip)
+ if value is None:
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='DELETE',
+ timeout=timeout,
+ accept_errors=['FAILOVER_ALREADY_ROUTED']
+ )
+ else:
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(
+ active_server_ip=value,
+ )
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=timeout,
+ data=urlencode(data),
+ headers=headers,
+ accept_errors=['FAILOVER_ALREADY_ROUTED']
+ )
+ if error is not None:
+ return value, False
+ else:
+ return result['failover']['active_server_ip'], True
+
+
+def get_failover_state(value):
+ '''
+ Create result dictionary for failover IP's value.
+
+ The value ``None`` represents unrouted.
+ '''
+ return dict(
+ value=value,
+ state='routed' if value else 'unrouted'
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
new file mode 100644
index 00000000..05e0c137
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
@@ -0,0 +1,441 @@
+# Copyright (c), Google Inc, 2017
+# Simplified BSD License (see licenses/simplified_bsd.txt or
+# https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+import traceback
+
+THIRD_LIBRARIES_IMP_ERR = None
+try:
+ from keystoneauth1.adapter import Adapter
+ from keystoneauth1.identity import v3
+ from keystoneauth1 import session
+ HAS_THIRD_LIBRARIES = True
+except ImportError:
+ THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
+ HAS_THIRD_LIBRARIES = False
+
+from ansible.module_utils.basic import (AnsibleModule, env_fallback,
+ missing_required_lib)
+from ansible.module_utils._text import to_text
+
+
+class HwcModuleException(Exception):
+ def __init__(self, message):
+ super(HwcModuleException, self).__init__()
+
+ self._message = message
+
+ def __str__(self):
+ return "[HwcClientException] message=%s" % self._message
+
+
+class HwcClientException(Exception):
+ def __init__(self, code, message):
+ super(HwcClientException, self).__init__()
+
+ self._code = code
+ self._message = message
+
+ def __str__(self):
+ msg = " code=%s," % str(self._code) if self._code != 0 else ""
+ return "[HwcClientException]%s message=%s" % (
+ msg, self._message)
+
+
+class HwcClientException404(HwcClientException):
+ def __init__(self, message):
+ super(HwcClientException404, self).__init__(404, message)
+
+ def __str__(self):
+ return "[HwcClientException404] message=%s" % self._message
+
+
+def session_method_wrapper(f):
+ def _wrap(self, url, *args, **kwargs):
+ try:
+ url = self.endpoint + url
+ r = f(self, url, *args, **kwargs)
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Sending request failed, error=%s" % ex)
+
+ result = None
+ if r.content:
+ try:
+ result = r.json()
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Parsing response to json failed, error: %s" % ex)
+
+ code = r.status_code
+ if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
+ msg = ""
+ for i in ['message', 'error.message']:
+ try:
+ msg = navigate_value(result, i)
+ break
+ except Exception:
+ pass
+ else:
+ msg = str(result)
+
+ if code == 404:
+ raise HwcClientException404(msg)
+
+ raise HwcClientException(code, msg)
+
+ return result
+
+ return _wrap
+
+
+class _ServiceClient(object):
+ def __init__(self, client, endpoint, product):
+ self._client = client
+ self._endpoint = endpoint
+ self._default_header = {
+ 'User-Agent': "Huawei-Ansible-MM-%s" % product,
+ 'Accept': 'application/json',
+ }
+
+ @property
+ def endpoint(self):
+ return self._endpoint
+
+ @endpoint.setter
+ def endpoint(self, e):
+ self._endpoint = e
+
+ @session_method_wrapper
+ def get(self, url, body=None, header=None, timeout=None):
+ return self._client.get(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def post(self, url, body=None, header=None, timeout=None):
+ return self._client.post(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def delete(self, url, body=None, header=None, timeout=None):
+ return self._client.delete(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def put(self, url, body=None, header=None, timeout=None):
+ return self._client.put(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ def _header(self, header):
+ if header and isinstance(header, dict):
+ for k, v in self._default_header.items():
+ if k not in header:
+ header[k] = v
+ else:
+ header = self._default_header
+
+ return header
+
+
+class Config(object):
+ def __init__(self, module, product):
+ self._project_client = None
+ self._domain_client = None
+ self._module = module
+ self._product = product
+ self._endpoints = {}
+
+ self._validate()
+ self._gen_provider_client()
+
+ @property
+ def module(self):
+ return self._module
+
+ def client(self, region, service_type, service_level):
+ c = self._project_client
+ if service_level == "domain":
+ c = self._domain_client
+
+ e = self._get_service_endpoint(c, service_type, region)
+
+ return _ServiceClient(c, e, self._product)
+
+ def _gen_provider_client(self):
+ m = self._module
+ p = {
+ "auth_url": m.params['identity_endpoint'],
+ "password": m.params['password'],
+ "username": m.params['user'],
+ "project_name": m.params['project'],
+ "user_domain_name": m.params['domain'],
+ "reauthenticate": True
+ }
+
+ self._project_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ p.pop("project_name")
+ self._domain_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ def _get_service_endpoint(self, client, service_type, region):
+ k = "%s.%s" % (service_type, region if region else "")
+
+ if k in self._endpoints:
+ return self._endpoints.get(k)
+
+ url = None
+ try:
+ url = client.get_endpoint(service_type=service_type,
+ region_name=region, interface="public")
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Getting endpoint failed, error=%s" % ex)
+
+ if url == "":
+ raise HwcClientException(
+ 0, "Can not find the enpoint for %s" % service_type)
+
+ if url[-1] != "/":
+ url += "/"
+
+ self._endpoints[k] = url
+ return url
+
+ def _validate(self):
+ if not HAS_THIRD_LIBRARIES:
+ self.module.fail_json(
+ msg=missing_required_lib('keystoneauth1'),
+ exception=THIRD_LIBRARIES_IMP_ERR)
+
+
+class HwcModule(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ arg_spec = kwargs.setdefault('argument_spec', {})
+
+ arg_spec.update(
+ dict(
+ identity_endpoint=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
+ ),
+ user=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
+ ),
+ password=dict(
+ required=True, type='str', no_log=True,
+ fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
+ ),
+ domain=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
+ ),
+ project=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
+ ),
+ region=dict(
+ type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
+ ),
+ id=dict(type='str')
+ )
+ )
+
+ super(HwcModule, self).__init__(*args, **kwargs)
+
+
+class _DictComparison(object):
+ ''' This class takes in two dictionaries `a` and `b`.
+ These are dictionaries of arbitrary depth, but made up of standard
+ Python types only.
+ This differ will compare all values in `a` to those in `b`.
+ If value in `a` is None, always returns True, indicating
+ this value is no need to compare.
+ Note: On all lists, order does matter.
+ '''
+
+ def __init__(self, request):
+ self.request = request
+
+ def __eq__(self, other):
+ return self._compare_dicts(self.request, other.request)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _compare_dicts(self, dict1, dict2):
+ if dict1 is None:
+ return True
+
+ if set(dict1.keys()) != set(dict2.keys()):
+ return False
+
+ for k in dict1:
+ if not self._compare_value(dict1.get(k), dict2.get(k)):
+ return False
+
+ return True
+
+ def _compare_lists(self, list1, list2):
+ """Takes in two lists and compares them."""
+ if list1 is None:
+ return True
+
+ if len(list1) != len(list2):
+ return False
+
+ for i in range(len(list1)):
+ if not self._compare_value(list1[i], list2[i]):
+ return False
+
+ return True
+
+ def _compare_value(self, value1, value2):
+ """
+ return: True: value1 is same as value2, otherwise False.
+ """
+ if value1 is None:
+ return True
+
+ if not (value1 and value2):
+ return (not value1) and (not value2)
+
+ # Can assume non-None types at this point.
+ if isinstance(value1, list) and isinstance(value2, list):
+ return self._compare_lists(value1, value2)
+
+ elif isinstance(value1, dict) and isinstance(value2, dict):
+ return self._compare_dicts(value1, value2)
+
+ # Always use to_text values to avoid unicode issues.
+ return (to_text(value1, errors='surrogate_or_strict') == to_text(
+ value2, errors='surrogate_or_strict'))
+
+
+def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
+ is_last_time = False
+ not_found_times = 0
+ wait = 0
+
+ time.sleep(delay)
+
+ end = time.time() + timeout
+ while not is_last_time:
+ if time.time() > end:
+ is_last_time = True
+
+ obj, status = refresh()
+
+ if obj is None:
+ not_found_times += 1
+
+ if not_found_times > 10:
+ raise HwcModuleException(
+ "not found the object for %d times" % not_found_times)
+ else:
+ not_found_times = 0
+
+ if status in target:
+ return obj
+
+ if pending and status not in pending:
+ raise HwcModuleException(
+ "unexpect status(%s) occured" % status)
+
+ if not is_last_time:
+ wait *= 2
+ if wait < min_interval:
+ wait = min_interval
+ elif wait > 10:
+ wait = 10
+
+ time.sleep(wait)
+
+ raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
+
+
+def navigate_value(data, index, array_index=None):
+ if array_index and (not isinstance(array_index, dict)):
+ raise HwcModuleException("array_index must be dict")
+
+ d = data
+ for n in range(len(index)):
+ if d is None:
+ return None
+
+ if not isinstance(d, dict):
+ raise HwcModuleException(
+ "can't navigate value from a non-dict object")
+
+ i = index[n]
+ if i not in d:
+ raise HwcModuleException(
+ "navigate value failed: key(%s) is not exist in dict" % i)
+ d = d[i]
+
+ if not array_index:
+ continue
+
+ k = ".".join(index[: (n + 1)])
+ if k not in array_index:
+ continue
+
+ if d is None:
+ return None
+
+ if not isinstance(d, list):
+ raise HwcModuleException(
+ "can't navigate value from a non-list object")
+
+ j = array_index.get(k)
+ if j >= len(d):
+ raise HwcModuleException(
+ "navigate value failed: the index is out of list")
+ d = d[j]
+
+ return d
+
+
+def build_path(module, path, kv=None):
+ if kv is None:
+ kv = dict()
+
+ v = {}
+ for p in re.findall(r"{[^/]*}", path):
+ n = p[1:][:-1]
+
+ if n in kv:
+ v[n] = str(kv[n])
+
+ else:
+ if n in module.params:
+ v[n] = str(module.params.get(n))
+ else:
+ v[n] = ""
+
+ return path.format(**v)
+
+
+def get_region(module):
+ if module.params['region']:
+ return module.params['region']
+
+ return module.params['project'].split("_")[0]
+
+
+def is_empty_value(v):
+ return (not v)
+
+
+def are_different_dicts(dict1, dict2):
+ return _DictComparison(dict1) != _DictComparison(dict2)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
new file mode 100644
index 00000000..c3ab4103
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from functools import wraps
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+PYXCLI_INSTALLED = True
+PYXCLI_IMP_ERR = None
+try:
+ from pyxcli import client, errors
+except ImportError:
+ PYXCLI_IMP_ERR = traceback.format_exc()
+ PYXCLI_INSTALLED = False
+
+AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
+ 'domain', 'perf_class', 'vol',
+ 'iscsi_chap_name', 'iscsi_chap_secret',
+ 'cluster', 'host', 'lun', 'override',
+ 'fcaddress', 'iscsi_name', 'max_dms',
+ 'max_cgs', 'ldap_id', 'max_mirrors',
+ 'max_pools', 'max_volumes', 'hard_capacity',
+ 'soft_capacity']
+
+
+def xcli_wrapper(func):
+ """ Catch xcli errors and return a proper message"""
+ @wraps(func)
+ def wrapper(module, *args, **kwargs):
+ try:
+ return func(module, *args, **kwargs)
+ except errors.CommandExecutionError as e:
+ module.fail_json(msg=to_native(e))
+ return wrapper
+
+
+@xcli_wrapper
+def connect_ssl(module):
+ endpoints = module.params['endpoints']
+ username = module.params['username']
+ password = module.params['password']
+ if not (username and password and endpoints):
+ module.fail_json(
+ msg="Username, password or endpoints arguments "
+ "are missing from the module arguments")
+
+ try:
+ return client.XCLIClient.connect_multiendpoint_ssl(username,
+ password,
+ endpoints)
+ except errors.CommandFailedConnectionError as e:
+ module.fail_json(
+ msg="Connection with Spectrum Accelerate system has "
+ "failed: {[0]}.".format(to_native(e)))
+
+
+def spectrum_accelerate_spec():
+ """ Return arguments spec for AnsibleModule """
+ return dict(
+ endpoints=dict(required=True),
+ username=dict(required=True),
+ password=dict(no_log=True, required=True),
+ )
+
+
+@xcli_wrapper
+def execute_pyxcli_command(module, xcli_command, xcli_client):
+ pyxcli_args = build_pyxcli_command(module.params)
+ getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args))
+ return True
+
+
+def build_pyxcli_command(fields):
+ """ Builds the args for pyxcli using the exact args from ansible"""
+ pyxcli_args = {}
+ for field in fields:
+ if not fields[field]:
+ continue
+ if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
+ pyxcli_args[field] = fields[field]
+ return pyxcli_args
+
+
+def is_pyxcli_installed(module):
+ if not PYXCLI_INSTALLED:
+ module.fail_json(msg=missing_required_lib('pyxcli'),
+ exception=PYXCLI_IMP_ERR)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
new file mode 100644
index 00000000..5c57e755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
@@ -0,0 +1,482 @@
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+
+URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
+URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
+URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
+URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles"
+URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
+
+URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
+URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
+URL_GROUPS = "{url}/admin/realms/{realm}/groups"
+URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
+
+
+def keycloak_argument_spec():
+ """
+ Returns argument_spec of options common to keycloak_*-modules
+
+ :return: argument_spec dict
+ """
+ return dict(
+ auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
+ auth_client_id=dict(type='str', default='admin-cli'),
+ auth_realm=dict(type='str', required=True),
+ auth_client_secret=dict(type='str', default=None, no_log=True),
+ auth_username=dict(type='str', aliases=['username'], required=True),
+ auth_password=dict(type='str', aliases=['password'], required=True, no_log=True),
+ validate_certs=dict(type='bool', default=True)
+ )
+
+
+def camel(words):
+ return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
+
+
+class KeycloakError(Exception):
+ pass
+
+
+def get_token(base_url, validate_certs, auth_realm, client_id,
+ auth_username, auth_password, client_secret):
+ if not base_url.lower().startswith(('http', 'https')):
+ raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
+ auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
+ temp_payload = {
+ 'grant_type': 'password',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'username': auth_username,
+ 'password': auth_password,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = dict(
+ (k, v) for k, v in temp_payload.items() if v is not None)
+ try:
+ r = json.loads(to_native(open_url(auth_url, method='POST',
+ validate_certs=validate_certs,
+ data=urlencode(payload)).read()))
+ except ValueError as e:
+ raise KeycloakError(
+ 'API returned invalid JSON when trying to obtain access token from %s: %s'
+ % (auth_url, str(e)))
+ except Exception as e:
+ raise KeycloakError('Could not obtain access token from %s: %s'
+ % (auth_url, str(e)))
+
+ try:
+ return {
+ 'Authorization': 'Bearer ' + r['access_token'],
+ 'Content-Type': 'application/json'
+ }
+ except KeyError:
+ raise KeycloakError(
+ 'Could not obtain access token from %s' % auth_url)
+
+
+class KeycloakAPI(object):
+ """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
+ is obtained through OpenID connect
+ """
+ def __init__(self, module, connection_header):
+ self.module = module
+ self.baseurl = self.module.params.get('auth_keycloak_url')
+ self.validate_certs = self.module.params.get('validate_certs')
+ self.restheaders = connection_header
+
+ def get_clients(self, realm='master', filter=None):
+ """ Obtains client representations for clients in a realm
+
+ :param realm: realm to be queried
+ :param filter: if defined, only the client with clientId specified in the filter is returned
+ :return: list of dicts of client representations
+ """
+ clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+ if filter is not None:
+ clientlist_url += '?clientId=%s' % filter
+
+ try:
+ return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_by_clientid(self, client_id, realm='master'):
+ """ Get client representation by clientId
+ :param client_id: The clientId to be queried
+ :param realm: realm from which to obtain the client representation
+ :return: dict with a client representation or None if none matching exist
+ """
+ r = self.get_clients(realm=realm, filter=client_id)
+ if len(r) > 0:
+ return r[0]
+ else:
+ return None
+
+ def get_client_by_id(self, id, realm='master'):
+ """ Obtain client representation by id
+
+ :param id: id (not clientId) of client to be queried
+ :param realm: client from this realm
+ :return: dict of client representation or None if none matching exist
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_id(self, client_id, realm='master'):
+ """ Obtain id of client by client_id
+
+ :param client_id: client_id of client to be queried
+ :param realm: client template from this realm
+ :return: id of client (usually a UUID)
+ """
+ result = self.get_client_by_clientid(client_id, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client(self, id, clientrep, realm="master"):
+ """ Update an existing client
+ :param id: id (not clientId) of client to be updated in Keycloak
+ :param clientrep: corresponding (partial/full) client representation with updates
+ :param realm: realm the client is in
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='PUT', headers=self.restheaders,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client(self, clientrep, realm="master"):
+ """ Create a client in keycloak
+ :param clientrep: Client representation of client to be created. Must at least contain field clientId
+ :param realm: realm for client to be created
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(client_url, method='POST', headers=self.restheaders,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client %s in realm %s: %s'
+ % (clientrep['clientId'], realm, str(e)))
+
+ def delete_client(self, id, realm="master"):
+ """ Delete a client from Keycloak
+
+ :param id: id (not clientId) of client to be deleted
+ :param realm: realm of client to be deleted
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_templates(self, realm='master'):
+ """ Obtains client template representations for client templates in a realm
+
+ :param realm: realm to be queried
+ :return: list of dicts of client representations
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_template_by_id(self, id, realm='master'):
+ """ Obtain client template representation by id
+
+ :param id: id (not name) of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_template_by_name(self, name, realm='master'):
+ """ Obtain client template representation by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ result = self.get_client_templates(realm)
+ if isinstance(result, list):
+ result = [x for x in result if x['name'] == name]
+ if len(result) > 0:
+ return result[0]
+ return None
+
+ def get_client_template_id(self, name, realm='master'):
+ """ Obtain client template id by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: client template id (usually a UUID)
+ """
+ result = self.get_client_template_by_name(name, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client_template(self, id, clienttrep, realm="master"):
+ """ Update an existing client template
+ :param id: id (not name) of client template to be updated in Keycloak
+ :param clienttrep: corresponding (partial/full) client template representation with updates
+ :param realm: realm the client template is in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='PUT', headers=self.restheaders,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client_template(self, clienttrep, realm="master"):
+ """ Create a client in keycloak
+ :param clienttrep: Client template representation of client template to be created. Must at least contain field name
+ :param realm: realm for client template to be created in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(url, method='POST', headers=self.restheaders,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
+ % (clienttrep['clientId'], realm, str(e)))
+
+ def delete_client_template(self, id, realm="master"):
+ """ Delete a client template from Keycloak
+
+ :param id: id (not name) of client to be deleted
+ :param realm: realm of client template to be deleted
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_groups(self, realm="master"):
+ """ Fetch the name and ID of all groups on the Keycloak server.
+
+ To fetch the full data of the group, make a subsequent call to
+ get_group_by_groupid, passing in the ID of the group you wish to return.
+
+ :param realm: Return the groups of this realm (default "master").
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
+ % (realm, str(e)))
+
+ def get_group_by_groupid(self, gid, realm="master"):
+ """ Fetch a keycloak group from the provided realm using the group's unique ID.
+
+ If the group does not exist, None is returned.
+
+ gid is a UUID provided by the Keycloak API
+ :param gid: UUID of the group to be returned
+ :param realm: Realm in which the group resides; default 'master'.
+ """
+ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+
+ def get_group_by_name(self, name, realm="master"):
+ """ Fetch a keycloak group within a realm based on its name.
+
+ The Keycloak API does not allow filtering of the Groups resource by name.
+ As a result, this method first retrieves the entire list of groups - name and ID -
+ then performs a second query to fetch the group.
+
+ If the group does not exist, None is returned.
+ :param name: Name of the group to fetch.
+ :param realm: Realm in which the group resides; default 'master'
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ all_groups = self.get_groups(realm=realm)
+
+ for group in all_groups:
+ if group['name'] == name:
+ return self.get_group_by_groupid(group['id'], realm=realm)
+
+ return None
+
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (name, realm, str(e)))
+
+ def create_group(self, grouprep, realm="master"):
+ """ Create a Keycloak group.
+
+ :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(groups_url, method='POST', headers=self.restheaders,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create group %s in realm %s: %s"
+ % (grouprep['name'], realm, str(e)))
+
+ def update_group(self, grouprep, realm="master"):
+ """ Update an existing group.
+
+ :param grouprep: A GroupRepresentation of the updated group.
+ :return HTTPResponse object on success
+ """
+ group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
+
+ try:
+ return open_url(group_url, method='PUT', headers=self.restheaders,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update group %s in realm %s: %s'
+ % (grouprep['name'], realm, str(e)))
+
+ def delete_group(self, name=None, groupid=None, realm="master"):
+ """ Delete a group. One of name or groupid must be provided.
+
+ Providing the group ID is preferred as it avoids a second lookup to
+ convert a group name to an ID.
+
+ :param name: The name of the group. A lookup will be performed to retrieve the group ID.
+ :param groupid: The ID of the group (preferred to name).
+ :param realm: The realm in which this group resides, default "master".
+ """
+
+ if groupid is None and name is None:
+ # prefer an exception since this is almost certainly a programming error in the module itself.
+ raise Exception("Unable to delete group - one of group ID or name must be provided.")
+
+ # only lookup the name if groupid isn't provided.
+ # in the case that both are provided, prefer the ID, since it's one
+ # less lookup.
+ if groupid is None and name is not None:
+ for group in self.get_groups(realm=realm):
+ if group['name'] == name:
+ groupid = group['id']
+ break
+
+ # if the group doesn't exist - no problem, nothing to delete.
+ if groupid is None:
+ return None
+
+ # should have a good groupid by here.
+ group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
+ try:
+ return open_url(group_url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+
+ except Exception as e:
+ self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py
new file mode 100644
index 00000000..92c78023
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests.exceptions
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+INFLUXDB_IMP_ERR = None
+try:
+ from influxdb import InfluxDBClient
+ from influxdb import __version__ as influxdb_version
+ from influxdb import exceptions
+ HAS_INFLUXDB = True
+except ImportError:
+ INFLUXDB_IMP_ERR = traceback.format_exc()
+ HAS_INFLUXDB = False
+
+
+class InfluxDb():
+ def __init__(self, module):
+ self.module = module
+ self.params = self.module.params
+ self.check_lib()
+ self.hostname = self.params['hostname']
+ self.port = self.params['port']
+ self.path = self.params['path']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.database_name = self.params.get('database_name')
+
+ def check_lib(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+
+ if not HAS_INFLUXDB:
+ self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
+
+ @staticmethod
+ def influxdb_argument_spec():
+ return dict(
+ hostname=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8086),
+ path=dict(type='str', default=''),
+ username=dict(type='str', default='root', aliases=['login_username']),
+ password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
+ ssl=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=True),
+ timeout=dict(type='int'),
+ retries=dict(type='int', default=3),
+ proxies=dict(type='dict', default={}),
+ use_udp=dict(type='bool', default=False),
+ udp_port=dict(type='int', default=4444),
+ )
+
+ def connect_to_influxdb(self):
+ args = dict(
+ host=self.hostname,
+ port=self.port,
+ username=self.username,
+ password=self.password,
+ database=self.database_name,
+ ssl=self.params['ssl'],
+ verify_ssl=self.params['validate_certs'],
+ timeout=self.params['timeout'],
+ use_udp=self.params['use_udp'],
+ udp_port=self.params['udp_port'],
+ proxies=self.params['proxies'],
+ )
+ influxdb_api_version = LooseVersion(influxdb_version)
+ if influxdb_api_version >= LooseVersion('4.1.0'):
+ # retries option is added in version 4.1.0
+ args.update(retries=self.params['retries'])
+
+ if influxdb_api_version >= LooseVersion('5.1.0'):
+ # path argument is added in version 5.1.0
+ args.update(path=self.path)
+
+ return InfluxDBClient(**args)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py
new file mode 100644
index 00000000..9eb9f406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import socket
+import uuid
+
+import re
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
+from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
+
+
+def _env_then_dns_fallback(*args, **kwargs):
+ ''' Load value from environment or DNS in that order'''
+ try:
+ result = env_fallback(*args, **kwargs)
+ if result == '':
+ raise AnsibleFallbackNotFound
+ except AnsibleFallbackNotFound:
+ # If no host was given, we try to guess it from IPA.
+ # The ipa-ca entry is a standard entry that IPA will have set for
+ # the CA.
+ try:
+ return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
+ except Exception:
+ raise AnsibleFallbackNotFound
+
+
+class IPAClient(object):
+ def __init__(self, module, host, port, protocol):
+ self.host = host
+ self.port = port
+ self.protocol = protocol
+ self.module = module
+ self.headers = None
+ self.timeout = module.params.get('ipa_timeout')
+ self.use_gssapi = False
+
+ def get_base_url(self):
+ return '%s://%s/ipa' % (self.protocol, self.host)
+
+ def get_json_url(self):
+ return '%s/session/json' % self.get_base_url()
+
+ def login(self, username, password):
+ if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
+ self.use_gssapi = True
+ elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
+ ccache = "MEMORY:" + str(uuid.uuid4())
+ os.environ['KRB5CCNAME'] = ccache
+ self.use_gssapi = True
+ else:
+ if not password:
+ if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
+ self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
+ self._fail('login', 'Password is required if not using '
+ 'GSSAPI. To use GSSAPI, please set the '
+ 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
+ ' environment variables.')
+ url = '%s/session/login_password' % self.get_base_url()
+ data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
+ headers = {'referer': self.get_base_url(),
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Accept': 'text/plain'}
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail('login', info['msg'])
+
+ self.headers = {'Cookie': info.get('set-cookie')}
+ except Exception as e:
+ self._fail('login', to_native(e))
+ if not self.headers:
+ self.headers = dict()
+ self.headers.update({
+ 'referer': self.get_base_url(),
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'})
+
+ def _fail(self, msg, e):
+ if 'message' in e:
+ err_string = e.get('message')
+ else:
+ err_string = e
+ self.module.fail_json(msg='%s: %s' % (msg, err_string))
+
+ def get_ipa_version(self):
+ response = self.ping()['summary']
+ ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
+ version_match = ipa_ver_regex.match(response)
+ ipa_version = None
+ if version_match:
+ ipa_version = version_match.groups()[0]
+ return ipa_version
+
+ def ping(self):
+ return self._post_json(method='ping', name=None)
+
+ def _post_json(self, method, name, item=None):
+ if item is None:
+ item = {}
+ url = '%s/session/json' % self.get_base_url()
+ data = dict(method=method)
+
+ # TODO: We should probably handle this a little better.
+ if method in ('ping', 'config_show'):
+ data['params'] = [[], {}]
+ elif method == 'config_mod':
+ data['params'] = [[], item]
+ else:
+ data['params'] = [[name], item]
+
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
+ headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail(method, info['msg'])
+ except Exception as e:
+ self._fail('post %s' % method, to_native(e))
+
+ if PY3:
+ charset = resp.headers.get_content_charset('latin-1')
+ else:
+ response_charset = resp.headers.getparam('charset')
+ if response_charset:
+ charset = response_charset
+ else:
+ charset = 'latin-1'
+ resp = json.loads(to_text(resp.read(), encoding=charset))
+ err = resp.get('error')
+ if err is not None:
+ self._fail('response %s' % method, err)
+
+ if 'result' in resp:
+ result = resp.get('result')
+ if 'result' in result:
+ result = result.get('result')
+ if isinstance(result, list):
+ if len(result) > 0:
+ return result[0]
+ else:
+ return {}
+ return result
+ return None
+
+ def get_diff(self, ipa_data, module_data):
+ result = []
+ for key in module_data.keys():
+ mod_value = module_data.get(key, None)
+ if isinstance(mod_value, list):
+ default = []
+ else:
+ default = None
+ ipa_value = ipa_data.get(key, default)
+ if isinstance(ipa_value, list) and not isinstance(mod_value, list):
+ mod_value = [mod_value]
+ if isinstance(ipa_value, list) and isinstance(mod_value, list):
+ mod_value = sorted(mod_value)
+ ipa_value = sorted(ipa_value)
+ if mod_value != ipa_value:
+ result.append(key)
+ return result
+
+ def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ remove_method(name=name, item={item: diff})
+ else:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ add_method(name=name, item={item: diff})
+ else:
+ add_method(name=name, item=diff)
+
+ return changed
+
+
+def ipa_argument_spec():
+ return dict(
+ ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
+ ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
+ ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
+ ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
+ ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
+ ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
+ validate_certs=dict(type='bool', default=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py
new file mode 100644
index 00000000..96f91ba8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py
@@ -0,0 +1,180 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import hmac
+import re
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+try:
+ from hashlib import sha1
+except ImportError:
+ import sha as sha1
+
+HASHED_KEY_MAGIC = "|1|"
+
+
+def is_ssh_url(url):
+
+ """ check if url is ssh """
+
+ if "@" in url and "://" not in url:
+ return True
+ for scheme in "ssh://", "git+ssh://", "ssh+git://":
+ if url.startswith(scheme):
+ return True
+ return False
+
+
+def get_fqdn_and_port(repo_url):
+
+ """ chop the hostname and port out of a url """
+
+ fqdn = None
+ port = None
+ ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
+ if "@" in repo_url and "://" not in repo_url:
+ # most likely an user@host:path or user@host/path type URL
+ repo_url = repo_url.split("@", 1)[1]
+ match = ipv6_re.match(repo_url)
+ # For this type of URL, colon specifies the path, not the port
+ if match:
+ fqdn, path = match.groups()
+ elif ":" in repo_url:
+ fqdn = repo_url.split(":")[0]
+ elif "/" in repo_url:
+ fqdn = repo_url.split("/")[0]
+ elif "://" in repo_url:
+ # this should be something we can parse with urlparse
+ parts = urlparse(repo_url)
+ # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
+ # ensure we actually have a parts[1] before continuing.
+ if parts[1] != '':
+ fqdn = parts[1]
+ if "@" in fqdn:
+ fqdn = fqdn.split("@", 1)[1]
+ match = ipv6_re.match(fqdn)
+ if match:
+ fqdn, port = match.groups()
+ elif ":" in fqdn:
+ fqdn, port = fqdn.split(":")[0:2]
+ return fqdn, port
+
+
+def check_hostkey(module, fqdn):
+ return not not_in_host_file(module, fqdn)
+
+
+# this is a variant of code found in connection_plugins/paramiko.py and we should modify
+# the paramiko code to import and use this.
+
+def not_in_host_file(self, host):
+
+ if 'USER' in os.environ:
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_host_file = "~/.ssh/known_hosts"
+ user_host_file = os.path.expanduser(user_host_file)
+
+ host_file_list = []
+ host_file_list.append(user_host_file)
+ host_file_list.append("/etc/ssh/ssh_known_hosts")
+ host_file_list.append("/etc/ssh/ssh_known_hosts2")
+ host_file_list.append("/etc/openssh/ssh_known_hosts")
+
+ hfiles_not_found = 0
+ for hf in host_file_list:
+ if not os.path.exists(hf):
+ hfiles_not_found += 1
+ continue
+
+ try:
+ host_fh = open(hf)
+ except IOError:
+ hfiles_not_found += 1
+ continue
+ else:
+ data = host_fh.read()
+ host_fh.close()
+
+ for line in data.split("\n"):
+ if line is None or " " not in line:
+ continue
+ tokens = line.split()
+ if tokens[0].find(HASHED_KEY_MAGIC) == 0:
+ # this is a hashed known host entry
+ try:
+ (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
+ hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+ hash.update(host)
+ if hash.digest() == kn_host.decode('base64'):
+ return False
+ except Exception:
+ # invalid hashed host key, skip it
+ continue
+ else:
+ # standard host file entry
+ if host in tokens[0]:
+ return False
+
+ return True
+
+
+def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
+
+ """ use ssh-keyscan to add the hostkey """
+
+ keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
+
+ if 'USER' in os.environ:
+ user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_ssh_dir = "~/.ssh/"
+ user_host_file = "~/.ssh/known_hosts"
+ user_ssh_dir = os.path.expanduser(user_ssh_dir)
+
+ if not os.path.exists(user_ssh_dir):
+ if create_dir:
+ try:
+ os.makedirs(user_ssh_dir, int('700', 8))
+ except Exception:
+ module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
+ else:
+ module.fail_json(msg="%s does not exist" % user_ssh_dir)
+ elif not os.path.isdir(user_ssh_dir):
+ module.fail_json(msg="%s is not a directory" % user_ssh_dir)
+
+ if port:
+ this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
+ else:
+ this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
+
+ rc, out, err = module.run_command(this_cmd)
+ # ssh-keyscan gives a 0 exit code and prints nothing on timeout
+ if rc != 0 or not out:
+ msg = 'failed to retrieve hostkey'
+ if not out:
+ msg += '. "%s" returned no matches.' % this_cmd
+ else:
+ msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
+
+ if err:
+ msg += ' [stderr]: %s' % err
+
+ module.fail_json(msg=msg)
+
+ module.append_to_file(user_host_file, out)
+
+ return rc, out, err
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py
new file mode 100644
index 00000000..90d8d9ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py
@@ -0,0 +1,465 @@
+# -*- coding: utf-8 -*-
+#
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+from distutils.version import Version
+
+from ansible.module_utils.common import dict_transformations
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible_collections.community.kubernetes.plugins.module_utils.common import list_dict_str
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+import copy
+import re
+
+MAX_SUPPORTED_API_VERSION = 'v1alpha3'
+API_GROUP = 'kubevirt.io'
+
+
+# Put all args that (can) modify 'spec:' here:
+VM_SPEC_DEF_ARG_SPEC = {
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'memory': {'type': 'str'},
+ 'memory_limit': {'type': 'str'},
+ 'cpu_cores': {'type': 'int'},
+ 'disks': {'type': 'list'},
+ 'labels': {'type': 'dict'},
+ 'interfaces': {'type': 'list'},
+ 'machine_type': {'type': 'str'},
+ 'cloud_init_nocloud': {'type': 'dict'},
+ 'bootloader': {'type': 'str'},
+ 'smbios_uuid': {'type': 'str'},
+ 'cpu_model': {'type': 'str'},
+ 'headless': {'type': 'str'},
+ 'hugepage_size': {'type': 'str'},
+ 'tablets': {'type': 'list'},
+ 'cpu_limit': {'type': 'int'},
+ 'cpu_shares': {'type': 'int'},
+ 'cpu_features': {'type': 'list'},
+ 'affinity': {'type': 'dict'},
+ 'anti_affinity': {'type': 'dict'},
+ 'node_affinity': {'type': 'dict'},
+}
+# And other common args go here:
+VM_COMMON_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'hostname': {'type': 'str'},
+ 'subdomain': {'type': 'str'},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {'type': 'list', 'choices': ['json', 'merge', 'strategic-merge']},
+ 'wait': {'type': 'bool', 'default': True},
+ 'wait_timeout': {'type': 'int', 'default': 120},
+ 'wait_sleep': {'type': 'int', 'default': 5},
+}
+VM_COMMON_ARG_SPEC.update(VM_SPEC_DEF_ARG_SPEC)
+
+
+def virtdict():
+ """
+ This function create dictionary, with defaults to dictionary.
+ """
+ return defaultdict(virtdict)
+
+
+class KubeAPIVersion(Version):
+ component_re = re.compile(r'(\d+ | [a-z]+)', re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ errmsg = "version '{0}' does not conform to kubernetes api versioning guidelines".format(vstring)
+ c = components
+
+ if len(c) not in (2, 4) or c[0] != 'v' or not isinstance(c[1], int):
+ raise ValueError(errmsg)
+ if len(c) == 4 and (c[2] not in ('alpha', 'beta') or not isinstance(c[3], int)):
+ raise ValueError(errmsg)
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "KubeAPIVersion ('{0}')".format(str(self))
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = KubeAPIVersion(other)
+
+ myver = self.version
+ otherver = other.version
+
+ for ver in myver, otherver:
+ if len(ver) == 2:
+ ver.extend(['zeta', 9999])
+
+ if myver == otherver:
+ return 0
+ if myver < otherver:
+ return -1
+ if myver > otherver:
+ return 1
+
+ # python2 compatibility
+ def __cmp__(self, other):
+ return self._cmp(other)
+
+
+class KubeVirtRawModule(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtRawModule, self).__init__(*args, **kwargs)
+
+ @staticmethod
+ def merge_dicts(base_dict, merging_dicts):
+ """This function merges a base dictionary with one or more other dictionaries.
+ The base dictionary takes precedence when there is a key collision.
+ merging_dicts can be a dict or a list or tuple of dicts. In the latter case, the
+ dictionaries at the front of the list have higher precedence over the ones at the end.
+ """
+ if not merging_dicts:
+ merging_dicts = ({},)
+
+ if not isinstance(merging_dicts, Sequence):
+ merging_dicts = (merging_dicts,)
+
+ new_dict = {}
+ for d in reversed(merging_dicts):
+ new_dict = dict_transformations.dict_merge(new_dict, d)
+
+ new_dict = dict_transformations.dict_merge(new_dict, base_dict)
+
+ return new_dict
+
+ def get_resource(self, resource):
+ try:
+ existing = resource.get(name=self.name, namespace=self.namespace)
+ except Exception:
+ existing = None
+
+ return existing
+
+ def _define_datavolumes(self, datavolumes, spec):
+ """
+ Takes datavoulmes parameter of Ansible and create kubevirt API datavolumesTemplateSpec
+ structure from it
+ """
+ if not datavolumes:
+ return
+
+ spec['dataVolumeTemplates'] = []
+ for dv in datavolumes:
+ # Add datavolume to datavolumetemplates spec:
+ dvt = virtdict()
+ dvt['metadata']['name'] = dv.get('name')
+ dvt['spec']['pvc'] = {
+ 'accessModes': dv.get('pvc').get('accessModes'),
+ 'resources': {
+ 'requests': {
+ 'storage': dv.get('pvc').get('storage'),
+ }
+ }
+ }
+ dvt['spec']['source'] = dv.get('source')
+ spec['dataVolumeTemplates'].append(dvt)
+
+ # Add datavolume to disks spec:
+ if not spec['template']['spec']['domain']['devices']['disks']:
+ spec['template']['spec']['domain']['devices']['disks'] = []
+
+ spec['template']['spec']['domain']['devices']['disks'].append(
+ {
+ 'name': dv.get('name'),
+ 'disk': dv.get('disk', {'bus': 'virtio'}),
+ }
+ )
+
+ # Add datavolume to volumes spec:
+ if not spec['template']['spec']['volumes']:
+ spec['template']['spec']['volumes'] = []
+
+ spec['template']['spec']['volumes'].append(
+ {
+ 'dataVolume': {
+ 'name': dv.get('name')
+ },
+ 'name': dv.get('name'),
+ }
+ )
+
+ def _define_cloud_init(self, cloud_init_nocloud, template_spec):
+ """
+ Takes the user's cloud_init_nocloud parameter and fill it in kubevirt
+ API strucuture. The name for disk is hardcoded to ansiblecloudinitdisk.
+ """
+ if cloud_init_nocloud:
+ if not template_spec['volumes']:
+ template_spec['volumes'] = []
+ if not template_spec['domain']['devices']['disks']:
+ template_spec['domain']['devices']['disks'] = []
+
+ template_spec['volumes'].append({'name': 'ansiblecloudinitdisk', 'cloudInitNoCloud': cloud_init_nocloud})
+ template_spec['domain']['devices']['disks'].append({
+ 'name': 'ansiblecloudinitdisk',
+ 'disk': {'bus': 'virtio'},
+ })
+
+ def _define_interfaces(self, interfaces, template_spec, defaults):
+ """
+ Takes interfaces parameter of Ansible and create kubevirt API interfaces
+ and networks strucutre out from it.
+ """
+ if not interfaces and defaults and 'interfaces' in defaults:
+ interfaces = copy.deepcopy(defaults['interfaces'])
+ for d in interfaces:
+ d['network'] = defaults['networks'][0]
+
+ if interfaces:
+ # Extract interfaces k8s specification from interfaces list passed to Ansible:
+ spec_interfaces = []
+ for i in interfaces:
+ spec_interfaces.append(
+ self.merge_dicts(dict((k, v) for k, v in i.items() if k != 'network'), defaults['interfaces'])
+ )
+ if 'interfaces' not in template_spec['domain']['devices']:
+ template_spec['domain']['devices']['interfaces'] = []
+ template_spec['domain']['devices']['interfaces'].extend(spec_interfaces)
+
+ # Extract networks k8s specification from interfaces list passed to Ansible:
+ spec_networks = []
+ for i in interfaces:
+ net = i['network']
+ net['name'] = i['name']
+ spec_networks.append(self.merge_dicts(net, defaults['networks']))
+ if 'networks' not in template_spec:
+ template_spec['networks'] = []
+ template_spec['networks'].extend(spec_networks)
+
+ def _define_disks(self, disks, template_spec, defaults):
+ """
+ Takes disks parameter of Ansible and create kubevirt API disks and
+ volumes strucutre out from it.
+ """
+ if not disks and defaults and 'disks' in defaults:
+ disks = copy.deepcopy(defaults['disks'])
+ for d in disks:
+ d['volume'] = defaults['volumes'][0]
+
+ if disks:
+ # Extract k8s specification from disks list passed to Ansible:
+ spec_disks = []
+ for d in disks:
+ spec_disks.append(
+ self.merge_dicts(dict((k, v) for k, v in d.items() if k != 'volume'), defaults['disks'])
+ )
+ if 'disks' not in template_spec['domain']['devices']:
+ template_spec['domain']['devices']['disks'] = []
+ template_spec['domain']['devices']['disks'].extend(spec_disks)
+
+ # Extract volumes k8s specification from disks list passed to Ansible:
+ spec_volumes = []
+ for d in disks:
+ volume = d['volume']
+ volume['name'] = d['name']
+ spec_volumes.append(self.merge_dicts(volume, defaults['volumes']))
+ if 'volumes' not in template_spec:
+ template_spec['volumes'] = []
+ template_spec['volumes'].extend(spec_volumes)
+
+ def find_supported_resource(self, kind):
+ results = self.client.resources.search(kind=kind, group=API_GROUP)
+ if not results:
+ self.fail('Failed to find resource {0} in {1}'.format(kind, API_GROUP))
+ sr = sorted(results, key=lambda r: KubeAPIVersion(r.api_version), reverse=True)
+ for r in sr:
+ if KubeAPIVersion(r.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION):
+ return r
+ self.fail("API versions {0} are too recent. Max supported is {1}/{2}.".format(
+ str([r.api_version for r in sr]), API_GROUP, MAX_SUPPORTED_API_VERSION))
+
+ def _construct_vm_definition(self, kind, definition, template, params, defaults=None):
+ self.client = self.get_api_client()
+
+ disks = params.get('disks', [])
+ memory = params.get('memory')
+ memory_limit = params.get('memory_limit')
+ cpu_cores = params.get('cpu_cores')
+ cpu_model = params.get('cpu_model')
+ cpu_features = params.get('cpu_features')
+ labels = params.get('labels')
+ datavolumes = params.get('datavolumes')
+ interfaces = params.get('interfaces')
+ bootloader = params.get('bootloader')
+ cloud_init_nocloud = params.get('cloud_init_nocloud')
+ machine_type = params.get('machine_type')
+ headless = params.get('headless')
+ smbios_uuid = params.get('smbios_uuid')
+ hugepage_size = params.get('hugepage_size')
+ tablets = params.get('tablets')
+ cpu_shares = params.get('cpu_shares')
+ cpu_limit = params.get('cpu_limit')
+ node_affinity = params.get('node_affinity')
+ vm_affinity = params.get('affinity')
+ vm_anti_affinity = params.get('anti_affinity')
+ hostname = params.get('hostname')
+ subdomain = params.get('subdomain')
+ template_spec = template['spec']
+
+ # Merge additional flat parameters:
+ if memory:
+ template_spec['domain']['resources']['requests']['memory'] = memory
+
+ if cpu_shares:
+ template_spec['domain']['resources']['requests']['cpu'] = cpu_shares
+
+ if cpu_limit:
+ template_spec['domain']['resources']['limits']['cpu'] = cpu_limit
+
+ if tablets:
+ for tablet in tablets:
+ tablet['type'] = 'tablet'
+ template_spec['domain']['devices']['inputs'] = tablets
+
+ if memory_limit:
+ template_spec['domain']['resources']['limits']['memory'] = memory_limit
+
+ if hugepage_size is not None:
+ template_spec['domain']['memory']['hugepages']['pageSize'] = hugepage_size
+
+ if cpu_features is not None:
+ template_spec['domain']['cpu']['features'] = cpu_features
+
+ if cpu_cores is not None:
+ template_spec['domain']['cpu']['cores'] = cpu_cores
+
+ if cpu_model:
+ template_spec['domain']['cpu']['model'] = cpu_model
+
+ if labels:
+ template['metadata']['labels'] = self.merge_dicts(labels, template['metadata']['labels'])
+
+ if machine_type:
+ template_spec['domain']['machine']['type'] = machine_type
+
+ if bootloader:
+ template_spec['domain']['firmware']['bootloader'] = {bootloader: {}}
+
+ if smbios_uuid:
+ template_spec['domain']['firmware']['uuid'] = smbios_uuid
+
+ if headless is not None:
+ template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless
+
+ if vm_affinity or vm_anti_affinity:
+ vms_affinity = vm_affinity or vm_anti_affinity
+ affinity_name = 'podAffinity' if vm_affinity else 'podAntiAffinity'
+ for affinity in vms_affinity.get('soft', []):
+ if not template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'].append({
+ 'weight': affinity.get('weight'),
+ 'podAffinityTerm': {
+ 'labelSelector': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ },
+ 'topologyKey': affinity.get('topology_key'),
+ },
+ })
+ for affinity in vms_affinity.get('hard', []):
+ if not template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'].append({
+ 'labelSelector': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ },
+ 'topologyKey': affinity.get('topology_key'),
+ })
+
+ if node_affinity:
+ for affinity in node_affinity.get('soft', []):
+ if not template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'].append({
+ 'weight': affinity.get('weight'),
+ 'preference': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ }
+ })
+ for affinity in node_affinity.get('hard', []):
+ if not template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']:
+ template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'] = []
+ template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'].append({
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ })
+
+ if hostname:
+ template_spec['hostname'] = hostname
+
+ if subdomain:
+ template_spec['subdomain'] = subdomain
+
+ # Define disks
+ self._define_disks(disks, template_spec, defaults)
+
+ # Define cloud init disk if defined:
+ # Note, that this must be called after _define_disks, so the cloud_init
+ # is not first in order and it's not used as boot disk:
+ self._define_cloud_init(cloud_init_nocloud, template_spec)
+
+ # Define interfaces:
+ self._define_interfaces(interfaces, template_spec, defaults)
+
+ # Define datavolumes:
+ self._define_datavolumes(datavolumes, definition['spec'])
+
+ return self.merge_dicts(definition, self.resource_definitions[0])
+
+ def construct_vm_definition(self, kind, definition, template, defaults=None):
+ definition = self._construct_vm_definition(kind, definition, template, self.params, defaults)
+ resource = self.find_supported_resource(kind)
+ definition = self.set_defaults(resource, definition)
+ return resource, definition
+
+ def construct_vm_template_definition(self, kind, definition, template, params):
+ definition = self._construct_vm_definition(kind, definition, template, params)
+ resource = self.find_resource(kind, definition['apiVersion'], fail=True)
+
+ # Set defaults:
+ definition['kind'] = kind
+ definition['metadata']['name'] = params.get('name')
+ definition['metadata']['namespace'] = params.get('namespace')
+
+ return resource, definition
+
+ def execute_crud(self, kind, definition):
+ """ Module execution """
+ resource = self.find_supported_resource(kind)
+ definition = self.set_defaults(resource, definition)
+ return self.perform_action(resource, definition)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py
new file mode 100644
index 00000000..d49d0a97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+from ansible.module_utils._text import to_native
+
+try:
+ import ldap
+ import ldap.sasl
+
+ HAS_LDAP = True
+except ImportError:
+ HAS_LDAP = False
+
+
+def gen_specs(**specs):
+ specs.update({
+ 'bind_dn': dict(),
+ 'bind_pw': dict(default='', no_log=True),
+ 'dn': dict(required=True),
+ 'server_uri': dict(default='ldapi:///'),
+ 'start_tls': dict(default=False, type='bool'),
+ 'validate_certs': dict(default=True, type='bool'),
+ })
+
+ return specs
+
+
+class LdapGeneric(object):
+ def __init__(self, module):
+ # Shortcuts
+ self.module = module
+ self.bind_dn = self.module.params['bind_dn']
+ self.bind_pw = self.module.params['bind_pw']
+ self.dn = self.module.params['dn']
+ self.server_uri = self.module.params['server_uri']
+ self.start_tls = self.module.params['start_tls']
+ self.verify_cert = self.module.params['validate_certs']
+
+ # Establish connection
+ self.connection = self._connect_to_ldap()
+
+ def fail(self, msg, exn):
+ self.module.fail_json(
+ msg=msg,
+ details=to_native(exn),
+ exception=traceback.format_exc()
+ )
+
+ def _connect_to_ldap(self):
+ if not self.verify_cert:
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+
+ connection = ldap.initialize(self.server_uri)
+
+ if self.start_tls:
+ try:
+ connection.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ if self.bind_dn is not None:
+ connection.simple_bind_s(self.bind_dn, self.bind_pw)
+ else:
+ connection.sasl_interactive_bind_s('', ldap.sasl.external())
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+
+ return connection
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py
new file mode 100644
index 00000000..53d546db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py
@@ -0,0 +1,21 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Luke Murphy @decentral1se
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def get_user_agent(module):
+ """Retrieve a user-agent to send with LinodeClient requests."""
+ try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+ except ImportError:
+ ansible_version = 'unknown'
+ return 'Ansible-%s/%s' % (module, ansible_version)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py
new file mode 100644
index 00000000..e835a6ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import socket
+import ssl
+
+from ansible.module_utils.urls import generic_urlparse
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils._text import to_text
+
+# httplib/http.client connection using unix domain socket
+HTTPConnection = http_client.HTTPConnection
+HTTPSConnection = http_client.HTTPSConnection
+
+import json
+
+
+class UnixHTTPConnection(HTTPConnection):
+ def __init__(self, path):
+ HTTPConnection.__init__(self, 'localhost')
+ self.path = path
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.path)
+ self.sock = sock
+
+
+class LXDClientException(Exception):
+ def __init__(self, msg, **kwargs):
+ self.msg = msg
+ self.kwargs = kwargs
+
+
+class LXDClient(object):
+ def __init__(self, url, key_file=None, cert_file=None, debug=False):
+ """LXD Client.
+
+ :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
+ :type url: ``str``
+ :param key_file: The path of the client certificate key file.
+ :type key_file: ``str``
+ :param cert_file: The path of the client certificate file.
+ :type cert_file: ``str``
+ :param debug: The debug flag. The request and response are stored in logs when debug is true.
+ :type debug: ``bool``
+ """
+ self.url = url
+ self.debug = debug
+ self.logs = []
+ if url.startswith('https:'):
+ self.cert_file = cert_file
+ self.key_file = key_file
+ parts = generic_urlparse(urlparse(self.url))
+ ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ctx.load_cert_chain(cert_file, keyfile=key_file)
+ self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
+ elif url.startswith('unix:'):
+ unix_socket_path = url[len('unix:'):]
+ self.connection = UnixHTTPConnection(unix_socket_path)
+ else:
+ raise LXDClientException('URL scheme must be unix: or https:')
+
+ def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
+ resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
+ if resp_json['type'] == 'async':
+ url = '{0}/wait'.format(resp_json['operation'])
+ resp_json = self._send_request('GET', url)
+ if resp_json['metadata']['status'] != 'Success':
+ self._raise_err_from_json(resp_json)
+ return resp_json
+
+ def authenticate(self, trust_password):
+ body_json = {'type': 'client', 'password': trust_password}
+ return self._send_request('POST', '/1.0/certificates', body_json=body_json)
+
+ def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
+ try:
+ body = json.dumps(body_json)
+ self.connection.request(method, url, body=body)
+ resp = self.connection.getresponse()
+ resp_data = resp.read()
+ resp_data = to_text(resp_data, errors='surrogate_or_strict')
+ resp_json = json.loads(resp_data)
+ self.logs.append({
+ 'type': 'sent request',
+ 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
+ 'response': {'json': resp_json}
+ })
+ resp_type = resp_json.get('type', None)
+ if resp_type == 'error':
+ if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
+ return resp_json
+ if resp_json['error'] == "Certificate already in trust store":
+ return resp_json
+ self._raise_err_from_json(resp_json)
+ return resp_json
+ except socket.error as e:
+ raise LXDClientException('cannot connect to the LXD server', err=e)
+
+ def _raise_err_from_json(self, resp_json):
+ err_params = {}
+ if self.debug:
+ err_params['logs'] = self.logs
+ raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
+
+ @staticmethod
+ def _get_err_from_resp_json(resp_json):
+ err = None
+ metadata = resp_json.get('metadata', None)
+ if metadata is not None:
+ err = metadata.get('err', None)
+ if err is None:
+ err = resp_json.get('error', None)
+ return err
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py
new file mode 100644
index 00000000..7038fac8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py
@@ -0,0 +1,156 @@
+#
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+
+CLIENT_IMP_ERR = None
+try:
+ from manageiq_client.api import ManageIQClient
+ HAS_CLIENT = True
+except ImportError:
+ CLIENT_IMP_ERR = traceback.format_exc()
+ HAS_CLIENT = False
+
+
+def manageiq_argument_spec():
+ options = dict(
+ url=dict(default=os.environ.get('MIQ_URL', None)),
+ username=dict(default=os.environ.get('MIQ_USERNAME', None)),
+ password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
+ token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
+ )
+
+ return dict(
+ manageiq_connection=dict(type='dict',
+ apply_defaults=True,
+ options=options),
+ )
+
+
+def check_client(module):
+ if not HAS_CLIENT:
+ module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
+
+
+def validate_connection_params(module):
+ params = module.params['manageiq_connection']
+ error_str = "missing required argument: manageiq_connection[{}]"
+ url = params['url']
+ token = params['token']
+ username = params['username']
+ password = params['password']
+
+ if (url and username and password) or (url and token):
+ return params
+ for arg in ['url', 'username', 'password']:
+ if params[arg] in (None, ''):
+ module.fail_json(msg=error_str.format(arg))
+
+
+def manageiq_entities():
+ return {
+ 'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
+ 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
+ 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
+ 'service template': 'service_templates', 'template': 'templates',
+ 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
+ }
+
+
+class ManageIQ(object):
+ """
+ class encapsulating ManageIQ API client.
+ """
+
+ def __init__(self, module):
+ # handle import errors
+ check_client(module)
+
+ params = validate_connection_params(module)
+
+ url = params['url']
+ username = params['username']
+ password = params['password']
+ token = params['token']
+ verify_ssl = params['validate_certs']
+ ca_bundle_path = params['ca_cert']
+
+ self._module = module
+ self._api_url = url + '/api'
+ self._auth = dict(user=username, password=password, token=token)
+ try:
+ self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
+ except Exception as e:
+ self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
+
+ @property
+ def module(self):
+ """ Ansible module module
+
+ Returns:
+ the ansible module
+ """
+ return self._module
+
+ @property
+ def api_url(self):
+ """ Base ManageIQ API
+
+ Returns:
+ the base ManageIQ API
+ """
+ return self._api_url
+
+ @property
+ def client(self):
+ """ ManageIQ client
+
+ Returns:
+ the ManageIQ client
+ """
+ return self._client
+
+ def find_collection_resource_by(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, None otherwise.
+ """
+ try:
+ entity = self.client.collections.__getattribute__(collection_name).get(**params)
+ except ValueError:
+ return None
+ except Exception as e:
+ self.module.fail_json(msg="failed to find resource {error}".format(error=e))
+ return vars(entity)
+
+ def find_collection_resource_or_fail(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, Fail otherwise.
+ """
+ resource = self.find_collection_resource_by(collection_name, **params)
+ if resource:
+ return resource
+ else:
+ msg = "{collection_name} where {params} does not exist in manageiq".format(
+ collection_name=collection_name, params=str(params))
+ self.module.fail_json(msg=msg)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py
new file mode 100644
index 00000000..357fded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py
@@ -0,0 +1,137 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url, urllib_error
+from ansible.module_utils.basic import json
+
+
+class Response(object):
+ '''
+ Create a response object to mimic that of requests.
+ '''
+
+ def __init__(self):
+ self.content = None
+ self.status_code = None
+
+ def json(self):
+ return json.loads(self.content)
+
+
+def memset_api_call(api_key, api_method, payload=None):
+ '''
+ Generic function which returns results back to calling function.
+
+ Requires an API key and an API method to assemble the API URL.
+ Returns response text to be analysed.
+ '''
+ # instantiate a response object
+ response = Response()
+
+ # if we've already started preloading the payload then copy it
+ # and use that, otherwise we need to isntantiate it.
+ if payload is None:
+ payload = dict()
+ else:
+ payload = payload.copy()
+
+ # set some sane defaults
+ has_failed = False
+ msg = None
+
+ data = urlencode(payload)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ api_uri_base = 'https://api.memset.com/v1/json/'
+ api_uri = '{0}{1}/' . format(api_uri_base, api_method)
+
+ try:
+ resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
+ response.content = resp.read().decode('utf-8')
+ response.status_code = resp.getcode()
+ except urllib_error.HTTPError as e:
+ try:
+ errorcode = e.code
+ except AttributeError:
+ errorcode = None
+
+ has_failed = True
+ response.content = e.read().decode('utf8')
+ response.status_code = errorcode
+
+ if response.status_code is not None:
+ msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
+ else:
+ msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
+
+ if msg is None:
+ msg = response.json()
+
+ return(has_failed, msg, response)
+
+
+def check_zone_domain(data, domain):
+ '''
+ Returns true if domain already exists, and false if not.
+ '''
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone_domain in data.json():
+ if zone_domain['domain'] == domain:
+ exists = True
+
+ return(exists)
+
+
+def check_zone(data, name):
+ '''
+ Returns true if zone already exists, and false if not.
+ '''
+ counter = 0
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone in data.json():
+ if zone['nickname'] == name:
+ counter += 1
+ if counter == 1:
+ exists = True
+
+ return(exists, counter)
+
+
+def get_zone_id(zone_name, current_zones):
+ '''
+ Returns the zone's id if it exists and is unique
+ '''
+ zone_exists = False
+ zone_id, msg = None, None
+ zone_list = []
+
+ for zone in current_zones:
+ if zone['nickname'] == zone_name:
+ zone_list.append(zone['id'])
+
+ counter = len(zone_list)
+
+ if counter == 0:
+ msg = 'No matching zone found'
+ elif counter == 1:
+ zone_id = zone_list[0]
+ zone_exists = True
+ elif counter > 1:
+ zone_id = None
+ msg = 'Zone ID could not be returned as duplicate zone names were detected'
+
+ return(zone_exists, msg, counter, zone_id)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py
new file mode 100644
index 00000000..0e52db7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py
@@ -0,0 +1,302 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import partial, wraps
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ArgFormat(object):
+ """
+ Argument formatter
+ """
+ BOOLEAN = 0
+ PRINTF = 1
+ FORMAT = 2
+
+ @staticmethod
+ def stars_deco(num):
+ if num == 1:
+ def deco(f):
+ return lambda v: f(*v)
+ return deco
+ elif num == 2:
+ def deco(f):
+ return lambda v: f(**v)
+ return deco
+
+ return lambda f: f
+
+ def __init__(self, name, fmt=None, style=FORMAT, stars=0):
+ """
+ Creates a new formatter
+ :param name: Name of the argument to be formatted
+ :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
+ :param style: Whether arg_format (as str) should use printf-style formatting.
+ Ignored if arg_format is None or not a str (should be callable).
+ :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
+ """
+ def printf_fmt(_fmt, v):
+ try:
+ return [_fmt % v]
+ except TypeError as e:
+ if e.args[0] != 'not all arguments converted during string formatting':
+ raise
+ return [_fmt]
+
+ _fmts = {
+ ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
+ ArgFormat.PRINTF: printf_fmt,
+ ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
+ }
+
+ self.name = name
+ self.stars = stars
+
+ if fmt is None:
+ fmt = "{0}"
+ style = ArgFormat.FORMAT
+
+ if isinstance(fmt, str):
+ func = _fmts[style]
+ self.arg_format = partial(func, fmt)
+ elif isinstance(fmt, list) or isinstance(fmt, tuple):
+ self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
+ elif hasattr(fmt, '__call__'):
+ self.arg_format = fmt
+ else:
+ raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
+ 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
+
+ if stars:
+ self.arg_format = (self.stars_deco(stars))(self.arg_format)
+
+ def to_text(self, value):
+ func = self.arg_format
+ return [str(p) for p in func(value)]
+
+
+def cause_changes(func, on_success=True, on_failure=False):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ if on_success:
+ self.changed = True
+ except Exception as e:
+ if on_failure:
+ self.changed = True
+ raise
+ return wrapper
+
+
+def module_fails_on_exception(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(self, *args, **kwargs)
+ except SystemExit:
+ raise
+ except Exception as e:
+ self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
+ self.vars.exception = traceback.format_exc()
+ self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars)
+ return wrapper
+
+
+class DependencyCtxMgr(object):
+ def __init__(self, name, msg=None):
+ self.name = name
+ self.msg = msg
+ self.has_it = False
+ self.exc_type = None
+ self.exc_val = None
+ self.exc_tb = None
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.has_it = exc_type is None
+ self.exc_type = exc_type
+ self.exc_val = exc_val
+ self.exc_tb = exc_tb
+ return not self.has_it
+
+ @property
+ def text(self):
+ return self.msg or str(self.exc_val)
+
+
+class ModuleHelper(object):
+ _dependencies = []
+ module = {}
+ facts_name = None
+
+ class AttrDict(dict):
+ def __getattr__(self, item):
+ return self[item]
+
+ def __init__(self, module=None):
+ self.vars = ModuleHelper.AttrDict()
+ self.output_dict = dict()
+ self.facts_dict = dict()
+ self._changed = False
+
+ if module:
+ self.module = module
+
+ if not isinstance(module, AnsibleModule):
+ self.module = AnsibleModule(**self.module)
+
+ def update_output(self, **kwargs):
+ if kwargs:
+ self.output_dict.update(kwargs)
+
+ def update_facts(self, **kwargs):
+ if kwargs:
+ self.facts_dict.update(kwargs)
+
+ def __init_module__(self):
+ pass
+
+ def __run__(self):
+ raise NotImplementedError()
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, value):
+ self._changed = value
+
+ @property
+ def output(self):
+ result = dict(self.vars)
+ result.update(self.output_dict)
+ if self.facts_name:
+ result['ansible_facts'] = {self.facts_name: self.facts_dict}
+ return result
+
+ @module_fails_on_exception
+ def run(self):
+ self.fail_on_missing_deps()
+ self.__init_module__()
+ self.__run__()
+ self.module.exit_json(changed=self.changed, **self.output_dict)
+
+ @classmethod
+ def dependency(cls, name, msg):
+ cls._dependencies.append(DependencyCtxMgr(name, msg))
+ return cls._dependencies[-1]
+
+ def fail_on_missing_deps(self):
+ for d in self._dependencies:
+ if not d.has_it:
+ self.module.fail_json(changed=False,
+ exception=d.exc_val.__traceback__.format_exc(),
+ msg=d.text,
+ **self.output_dict)
+
+
+class StateMixin(object):
+ state_param = 'state'
+ default_state = None
+
+ def _state(self):
+ state = self.module.params.get(self.state_param)
+ return self.default_state if state is None else state
+
+ def __run__(self):
+ state = self._state()
+ self.vars.state = state
+
+ # resolve aliases
+ if state not in self.module.params:
+ aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
+ if aliased:
+ state = aliased[0]
+ self.vars.effective_state = state
+
+ method = "state_{0}".format(state)
+ if not hasattr(self, method):
+ return self.__state_fallback__()
+ func = getattr(self, method)
+ return func()
+
+ def __state_fallback__(self):
+ raise ValueError("Cannot find method for state: {0}".format(self._state()))
+
+
+class CmdMixin(object):
+ """
+ Mixin for mapping module options to running a CLI command with its arguments.
+ """
+ command = None
+ command_args_formats = dict()
+ check_rc = False
+ force_lang = "C"
+
+ @property
+ def module_formats(self):
+ result = {}
+ for param in self.module.params.keys():
+ result[param] = ArgFormat(param)
+ return result
+
+ @property
+ def custom_formats(self):
+ result = {}
+ for param, fmt_spec in self.command_args_formats.items():
+ result[param] = ArgFormat(param, **fmt_spec)
+ return result
+
+ def _calculate_args(self, extra_params=None, params=None):
+ def add_arg_formatted_param(_cmd_args, arg_format, _value):
+ args = [x for x in arg_format.to_text(_value)]
+ return _cmd_args + args
+
+ def find_format(_param):
+ return self.custom_formats.get(_param, self.module_formats.get(_param))
+
+ extra_params = extra_params or dict()
+ cmd_args = [self.module.get_bin_path(self.command)]
+ param_list = params if params else self.module.params.keys()
+
+ for param in param_list:
+ if param in self.module.argument_spec:
+ if param not in self.module.params:
+ continue
+ fmt = find_format(param)
+ value = self.module.params[param]
+ else:
+ if param not in extra_params:
+ continue
+ fmt = find_format(param)
+ value = extra_params[param]
+ self.cmd_args = cmd_args
+ cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
+
+ return cmd_args
+
+ def process_command_output(self, rc, out, err):
+ return rc, out, err
+
+ def run_command(self, extra_params=None, params=None, *args, **kwargs):
+ self.vars['cmd_args'] = self._calculate_args(extra_params, params)
+ env_update = kwargs.get('environ_update', {})
+ check_rc = kwargs.get('check_rc', self.check_rc)
+ if self.force_lang:
+ env_update.update({'LANGUAGE': self.force_lang})
+ self.update_output(force_lang=self.force_lang)
+ rc, out, err = self.module.run_command(self.vars['cmd_args'],
+ environ_update=env_update,
+ check_rc=check_rc, *args, **kwargs)
+ self.update_output(rc=rc, stdout=out, stderr=err)
+ return self.process_command_output(rc, out, err)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py
new file mode 100644
index 00000000..6cded8e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py
@@ -0,0 +1,590 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Red Hat Inc.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+
+try:
+ from infoblox_client.connector import Connector
+ from infoblox_client.exceptions import InfobloxException
+ HAS_INFOBLOX_CLIENT = True
+except ImportError:
+ HAS_INFOBLOX_CLIENT = False
+
+# defining nios constants
+NIOS_DNS_VIEW = 'view'
+NIOS_NETWORK_VIEW = 'networkview'
+NIOS_HOST_RECORD = 'record:host'
+NIOS_IPV4_NETWORK = 'network'
+NIOS_IPV6_NETWORK = 'ipv6network'
+NIOS_ZONE = 'zone_auth'
+NIOS_PTR_RECORD = 'record:ptr'
+NIOS_A_RECORD = 'record:a'
+NIOS_AAAA_RECORD = 'record:aaaa'
+NIOS_CNAME_RECORD = 'record:cname'
+NIOS_MX_RECORD = 'record:mx'
+NIOS_SRV_RECORD = 'record:srv'
+NIOS_NAPTR_RECORD = 'record:naptr'
+NIOS_TXT_RECORD = 'record:txt'
+NIOS_NSGROUP = 'nsgroup'
+NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
+NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
+NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
+NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
+NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
+NIOS_MEMBER = 'member'
+
+NIOS_PROVIDER_SPEC = {
+ 'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
+ 'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
+ 'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
+ 'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
+ 'silent_ssl_warnings': dict(type='bool', default=True),
+ 'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
+ 'http_pool_connections': dict(type='int', default=10),
+ 'http_pool_maxsize': dict(type='int', default=10),
+ 'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
+ 'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
+ 'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
+}
+
+
+def get_connector(*args, **kwargs):
+ ''' Returns an instance of infoblox_client.connector.Connector
+ :params args: positional arguments are silently ignored
+ :params kwargs: dict that is passed to Connector init
+ :returns: Connector
+ '''
+ if not HAS_INFOBLOX_CLIENT:
+ raise Exception('infoblox-client is required but does not appear '
+ 'to be installed. It can be installed using the '
+ 'command `pip install infoblox-client`')
+
+ if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
+ raise Exception('invalid or unsupported keyword argument for connector')
+ for key, value in iteritems(NIOS_PROVIDER_SPEC):
+ if key not in kwargs:
+ # apply default values from NIOS_PROVIDER_SPEC since we cannot just
+ # assume the provider values are coming from AnsibleModule
+ if 'default' in value:
+ kwargs[key] = value['default']
+
+ # override any values with env variables unless they were
+ # explicitly set
+ env = ('INFOBLOX_%s' % key).upper()
+ if env in os.environ:
+ kwargs[key] = os.environ.get(env)
+
+ if 'validate_certs' in kwargs.keys():
+ kwargs['ssl_verify'] = kwargs['validate_certs']
+ kwargs.pop('validate_certs', None)
+
+ return Connector(kwargs)
+
+
+def normalize_extattrs(value):
+ ''' Normalize extattrs field to expected format
+ The module accepts extattrs as key/value pairs. This method will
+ transform the key/value pairs into a structure suitable for
+ sending across WAPI in the format of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ '''
+ return dict([(k, {'value': v}) for k, v in iteritems(value)])
+
+
+def flatten_extattrs(value):
+ ''' Flatten the key/value struct for extattrs
+ WAPI returns extattrs field as a dict in form of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ This method will flatten the structure to:
+ extattrs: {
+ key: value
+ }
+ '''
+ return dict([(k, v['value']) for k, v in iteritems(value)])
+
+
+def member_normalize(member_spec):
+ ''' Transforms the member module arguments into a valid WAPI struct
+ This function will transform the arguments into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ key: <value>,
+ }
+ It will remove any arguments that are set to None since WAPI will error on
+ that condition.
+ The remainder of the value validation is performed by WAPI
+ Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
+ In this function, they are converted to dictionary.
+ '''
+ member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
+ 'pre_provisioning', 'network_setting', 'v6_network_setting',
+ 'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
+ 'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
+ for key in list(member_spec.keys()):
+ if key in member_elements and member_spec[key] is not None:
+ member_spec[key] = member_spec[key][0]
+ if isinstance(member_spec[key], dict):
+ member_spec[key] = member_normalize(member_spec[key])
+ elif isinstance(member_spec[key], list):
+ for x in member_spec[key]:
+ if isinstance(x, dict):
+ x = member_normalize(x)
+ elif member_spec[key] is None:
+ del member_spec[key]
+ return member_spec
+
+
+class WapiBase(object):
+ ''' Base class for implementing Infoblox WAPI API '''
+ provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
+
+ def __init__(self, provider):
+ self.connector = get_connector(**provider)
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if name.startswith('_'):
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+ return partial(self._invoke_method, name)
+
+ def _invoke_method(self, name, *args, **kwargs):
+ try:
+ method = getattr(self.connector, name)
+ return method(*args, **kwargs)
+ except InfobloxException as exc:
+ if hasattr(self, 'handle_exception'):
+ self.handle_exception(name, exc)
+ else:
+ raise
+
+
+class WapiLookup(WapiBase):
+ ''' Implements WapiBase for lookup plugins '''
+ def handle_exception(self, method_name, exc):
+ if ('text' in exc.response):
+ raise Exception(exc.response['text'])
+ else:
+ raise Exception(exc)
+
+
+class WapiInventory(WapiBase):
+ ''' Implements WapiBase for dynamic inventory script '''
+ pass
+
+
+class WapiModule(WapiBase):
+ ''' Implements WapiBase for executing a NIOS module '''
+ def __init__(self, module):
+ self.module = module
+ provider = module.params['provider']
+ try:
+ super(WapiModule, self).__init__(provider)
+ except Exception as exc:
+ self.module.fail_json(msg=to_text(exc))
+
+ def handle_exception(self, method_name, exc):
+ ''' Handles any exceptions raised
+ This method will be called if an InfobloxException is raised for
+ any call to the instance of Connector and also, in case of generic
+ exception. This method will then gracefully fail the module.
+ :args exc: instance of InfobloxException
+ '''
+ if ('text' in exc.response):
+ self.module.fail_json(
+ msg=exc.response['text'],
+ type=exc.response['Error'].split(':')[0],
+ code=exc.response.get('code'),
+ operation=method_name
+ )
+ else:
+ self.module.fail_json(msg=to_native(exc))
+
+ def run(self, ib_obj_type, ib_spec):
+ ''' Runs the module and performans configuration tasks
+ :args ib_obj_type: the WAPI object type to operate against
+ :args ib_spec: the specification for the WAPI object as a dict
+ :returns: a results dict
+ '''
+
+ update = new_name = None
+ state = self.module.params['state']
+ if state not in ('present', 'absent'):
+ self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
+
+ result = {'changed': False}
+
+ obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+
+ # get object reference
+ ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
+ proposed_object = {}
+ for key, value in iteritems(ib_spec):
+ if self.module.params[key] is not None:
+ if 'transform' in value:
+ proposed_object[key] = value['transform'](self.module)
+ else:
+ proposed_object[key] = self.module.params[key]
+
+ # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
+ if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ del proposed_object['view']
+ elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
+
+ if ib_obj_ref:
+ if len(ib_obj_ref) > 1:
+ for each in ib_obj_ref:
+ # To check for existing A_record with same name with input A_record by IP
+ if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
+ current_object = each
+ # To check for existing Host_record with same name with input Host_record by IP
+ elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
+ == proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
+ current_object = each
+ # Else set the current_object with input value
+ else:
+ current_object = obj_filter
+ ref = None
+ else:
+ current_object = ib_obj_ref[0]
+ if 'extattrs' in current_object:
+ current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
+ if current_object.get('_ref'):
+ ref = current_object.pop('_ref')
+ else:
+ current_object = obj_filter
+ ref = None
+ # checks if the object type is member to normalize the attributes being passed
+ if (ib_obj_type == NIOS_MEMBER):
+ proposed_object = member_normalize(proposed_object)
+
+ # checks if the name's field has been updated
+ if update and new_name:
+ proposed_object['name'] = new_name
+
+ check_remove = []
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # this check is for idempotency, as if the same ip address shall be passed
+ # add param will be removed, and same exists true for remove case as well.
+ if 'ipv4addrs' in [current_object and proposed_object]:
+ for each in current_object['ipv4addrs']:
+ if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['add']
+ break
+ check_remove += each.values()
+ if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['remove']
+
+ res = None
+ modified = not self.compare_objects(current_object, proposed_object)
+ if 'extattrs' in proposed_object:
+ proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
+
+ # Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
+ proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
+
+ if state == 'present':
+ if ref is None:
+ if not self.module.check_mode:
+ self.create_object(ib_obj_type, proposed_object)
+ result['changed'] = True
+ # Check if NIOS_MEMBER and the flag to call function create_token is set
+ elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
+ proposed_object = None
+ # the function creates a token that can be used by a pre-provisioned member to join the grid
+ result['api_results'] = self.call_func('create_token', ref, proposed_object)
+ result['changed'] = True
+ elif modified:
+ if 'ipv4addrs' in proposed_object:
+ if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
+ self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
+
+ if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
+ run_update = True
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ if 'ipv4addrs' in proposed_object:
+ if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
+ run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
+ if run_update:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ else:
+ res = ref
+ if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
+ # popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ del proposed_object['view']
+ if not self.module.check_mode:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif 'network_view' in proposed_object:
+ proposed_object.pop('network_view')
+ result['changed'] = True
+ if not self.module.check_mode and res is None:
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+
+ elif state == 'absent':
+ if ref is not None:
+ if 'ipv4addrs' in proposed_object:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ self.check_if_add_remove_ip_arg_exists(proposed_object)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif not self.module.check_mode:
+ self.delete_object(ref)
+ result['changed'] = True
+
+ return result
+
+ def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
+ ''' Send POST request if host record input name and retrieved ref name is same,
+ but input IP and retrieved IP is different'''
+
+ if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
+ obj_host_name = obj_filter['name']
+ ref_host_name = ib_obj_ref[0]['name']
+ if 'ipv4addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
+ proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
+ elif 'ipv6addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
+ proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
+
+ if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
+ self.create_object(ib_obj_type, proposed_object)
+
+ def check_if_nios_next_ip_exists(self, proposed_object):
+ ''' Check if nios_next_ip argument is passed in ipaddr while creating
+ host record, if yes then format proposed object ipv4addrs and pass
+ func:nextavailableip and ipaddr range to create hostrecord with next
+ available ip in one call to avoid any race condition '''
+
+ if 'ipv4addrs' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+ elif 'ipv4addr' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+
+ return proposed_object
+
+ def check_if_add_remove_ip_arg_exists(self, proposed_object):
+ '''
+ This function shall check if add/remove param is set to true and
+ is passed in the args, then we will update the proposed dictionary
+ to add/remove IP to existing host_record, if the user passes false
+ param with the argument nothing shall be done.
+ :returns: True if param is changed based on add/remove, and also the
+ changed proposed_object.
+ '''
+ update = False
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['add']:
+ proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs+'][0]['add']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['add']
+ elif 'remove' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['remove']:
+ proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs-'][0]['remove']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['remove']
+ return update, proposed_object
+
+ def issubset(self, item, objects):
+ ''' Checks if item is a subset of objects
+ :args item: the subset item to validate
+ :args objects: superset list of objects to validate against
+ :returns: True if item is a subset of one entry in objects otherwise
+ this method will return None
+ '''
+ for obj in objects:
+ if isinstance(item, dict):
+ if all(entry in obj.items() for entry in item.items()):
+ return True
+ else:
+ if item in obj:
+ return True
+
+ def compare_objects(self, current_object, proposed_object):
+ for key, proposed_item in iteritems(proposed_object):
+ current_item = current_object.get(key)
+
+ # if proposed has a key that current doesn't then the objects are
+ # not equal and False will be immediately returned
+ if current_item is None:
+ return False
+
+ elif isinstance(proposed_item, list):
+ if key == 'aliases':
+ if set(current_item) != set(proposed_item):
+ return False
+ for subitem in proposed_item:
+ if not self.issubset(subitem, current_item):
+ return False
+
+ elif isinstance(proposed_item, dict):
+ return self.compare_objects(current_item, proposed_item)
+
+ else:
+ if current_item != proposed_item:
+ return False
+
+ return True
+
+ def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
+ ''' this function gets the reference object of pre-existing nios objects '''
+
+ update = False
+ old_name = new_name = None
+ if ('name' in obj_filter):
+ # gets and returns the current object based on name/old_name passed
+ try:
+ name_obj = self.module._check_type_dict(obj_filter['name'])
+ old_name = name_obj['old_name']
+ new_name = name_obj['new_name']
+ except TypeError:
+ name = obj_filter['name']
+
+ if old_name and new_name:
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
+ elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
+ test_obj_filter = obj_filter
+ else:
+ test_obj_filter = dict([('name', old_name)])
+ # get the object reference
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ if ib_obj:
+ obj_filter['name'] = new_name
+ else:
+ test_obj_filter['name'] = new_name
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ update = True
+ return ib_obj, update, new_name
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # to check only by name if dns bypassing is set
+ if not obj_filter['configure_for_dns']:
+ test_obj_filter = dict([('name', name)])
+ else:
+ test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
+ elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
+ test_obj_filter = dict([['mac', obj_filter['mac']]])
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where a_record with uppercase name was returning null and was failing
+ test_obj_filter = obj_filter
+ test_obj_filter['name'] = test_obj_filter['name'].lower()
+ # resolves issue where multiple a_records with same name and different IP address
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ # check if test_obj_filter is empty copy passed obj_filter
+ else:
+ test_obj_filter = obj_filter
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where multiple a_records with same name and different IP address
+ test_obj_filter = obj_filter
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_ZONE):
+ # del key 'restart_if_needed' as nios_zone get_object fails with the key present
+ temp = ib_spec['restart_if_needed']
+ del ib_spec['restart_if_needed']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
+ if not ib_obj:
+ ib_spec['restart_if_needed'] = temp
+ elif (ib_obj_type == NIOS_MEMBER):
+ # del key 'create_token' as nios_member get_object fails with the key present
+ temp = ib_spec['create_token']
+ del ib_spec['create_token']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ if temp:
+ # reinstate 'create_token' key
+ ib_spec['create_token'] = temp
+ else:
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ return ib_obj, update, new_name
+
+ def on_update(self, proposed_object, ib_spec):
+ ''' Event called before the update is sent to the API endpoing
+ This method will allow the final proposed object to be changed
+ and/or keys filtered before it is sent to the API endpoint to
+ be processed.
+ :args proposed_object: A dict item that will be encoded and sent
+ the API endpoint with the updated data structure
+ :returns: updated object to be sent to API endpoint
+ '''
+ keys = set()
+ for key, value in iteritems(proposed_object):
+ update = ib_spec[key].get('update', True)
+ if not update:
+ keys.add(key)
+ return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py
new file mode 100644
index 00000000..466d2665
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py
@@ -0,0 +1,263 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+
+
+class OneAndOneResources:
+ firewall_policy = 'firewall_policy'
+ load_balancer = 'load_balancer'
+ monitoring_policy = 'monitoring_policy'
+ private_network = 'private_network'
+ public_ip = 'public_ip'
+ role = 'role'
+ server = 'server'
+ user = 'user'
+ vpn = 'vpn'
+
+
+def get_resource(oneandone_conn, resource_type, resource_id):
+ switcher = {
+ 'firewall_policy': oneandone_conn.get_firewall,
+ 'load_balancer': oneandone_conn.get_load_balancer,
+ 'monitoring_policy': oneandone_conn.get_monitoring_policy,
+ 'private_network': oneandone_conn.get_private_network,
+ 'public_ip': oneandone_conn.get_public_ip,
+ 'role': oneandone_conn.get_role,
+ 'server': oneandone_conn.get_server,
+ 'user': oneandone_conn.get_user,
+ 'vpn': oneandone_conn.get_vpn,
+ }
+
+ return switcher.get(resource_type, None)(resource_id)
+
+
+def get_datacenter(oneandone_conn, datacenter, full_object=False):
+ """
+ Validates the datacenter exists by ID or country code.
+ Returns the datacenter ID.
+ """
+ for _datacenter in oneandone_conn.list_datacenters():
+ if datacenter in (_datacenter['id'], _datacenter['country_code']):
+ if full_object:
+ return _datacenter
+ return _datacenter['id']
+
+
+def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
+ """
+ Validates the fixed instance size exists by ID or name.
+ Return the instance size ID.
+ """
+ for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
+ if fixed_instance_size in (_fixed_instance_size['id'],
+ _fixed_instance_size['name']):
+ if full_object:
+ return _fixed_instance_size
+ return _fixed_instance_size['id']
+
+
+def get_appliance(oneandone_conn, appliance, full_object=False):
+ """
+ Validates the appliance exists by ID or name.
+ Return the appliance ID.
+ """
+ for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
+ if appliance in (_appliance['id'], _appliance['name']):
+ if full_object:
+ return _appliance
+ return _appliance['id']
+
+
+def get_private_network(oneandone_conn, private_network, full_object=False):
+ """
+ Validates the private network exists by ID or name.
+ Return the private network ID.
+ """
+ for _private_network in oneandone_conn.list_private_networks():
+ if private_network in (_private_network['name'],
+ _private_network['id']):
+ if full_object:
+ return _private_network
+ return _private_network['id']
+
+
+def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
+ """
+ Validates the monitoring policy exists by ID or name.
+ Return the monitoring policy ID.
+ """
+ for _monitoring_policy in oneandone_conn.list_monitoring_policies():
+ if monitoring_policy in (_monitoring_policy['name'],
+ _monitoring_policy['id']):
+ if full_object:
+ return _monitoring_policy
+ return _monitoring_policy['id']
+
+
+def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
+ """
+ Validates the firewall policy exists by ID or name.
+ Return the firewall policy ID.
+ """
+ for _firewall_policy in oneandone_conn.list_firewall_policies():
+ if firewall_policy in (_firewall_policy['name'],
+ _firewall_policy['id']):
+ if full_object:
+ return _firewall_policy
+ return _firewall_policy['id']
+
+
+def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
+ """
+ Validates the load balancer exists by ID or name.
+ Return the load balancer ID.
+ """
+ for _load_balancer in oneandone_conn.list_load_balancers():
+ if load_balancer in (_load_balancer['name'],
+ _load_balancer['id']):
+ if full_object:
+ return _load_balancer
+ return _load_balancer['id']
+
+
+def get_server(oneandone_conn, instance, full_object=False):
+ """
+ Validates that the server exists whether by ID or name.
+ Returns the server if one was found.
+ """
+ for server in oneandone_conn.list_servers(per_page=1000):
+ if instance in (server['id'], server['name']):
+ if full_object:
+ return server
+ return server['id']
+
+
+def get_user(oneandone_conn, user, full_object=False):
+ """
+ Validates that the user exists by ID or a name.
+ Returns the user if one was found.
+ """
+ for _user in oneandone_conn.list_users(per_page=1000):
+ if user in (_user['id'], _user['name']):
+ if full_object:
+ return _user
+ return _user['id']
+
+
+def get_role(oneandone_conn, role, full_object=False):
+ """
+ Given a name, validates that the role exists
+ whether it is a proper ID or a name.
+ Returns the role if one was found, else None.
+ """
+ for _role in oneandone_conn.list_roles(per_page=1000):
+ if role in (_role['id'], _role['name']):
+ if full_object:
+ return _role
+ return _role['id']
+
+
+def get_vpn(oneandone_conn, vpn, full_object=False):
+ """
+ Validates that the vpn exists by ID or a name.
+ Returns the vpn if one was found.
+ """
+ for _vpn in oneandone_conn.list_vpns(per_page=1000):
+ if vpn in (_vpn['id'], _vpn['name']):
+ if full_object:
+ return _vpn
+ return _vpn['id']
+
+
+def get_public_ip(oneandone_conn, public_ip, full_object=False):
+ """
+ Validates that the public ip exists by ID or a name.
+ Returns the public ip if one was found.
+ """
+ for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
+ if public_ip in (_public_ip['id'], _public_ip['ip']):
+ if full_object:
+ return _public_ip
+ return _public_ip['id']
+
+
+def wait_for_resource_creation_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource create operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the resource info
+ resource = get_resource(oneandone_conn, resource_type, resource_id)
+
+ if resource_type == OneAndOneResources.server:
+ resource_state = resource['status']['state']
+ else:
+ resource_state = resource['state']
+
+ if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
+ (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
+ return
+ elif resource_state.lower() == 'failed':
+ raise Exception('%s creation failed for %s' % (resource_type, resource_id))
+ elif resource_state.lower() in ('active',
+ 'enabled',
+ 'deploying',
+ 'configuring'):
+ continue
+ else:
+ raise Exception(
+ 'Unknown %s state %s' % (resource_type, resource_state))
+
+ raise Exception(
+ 'Timed out waiting for %s completion for %s' % (resource_type, resource_id))
+
+
+def wait_for_resource_deletion_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource delete operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the operation info
+ logs = oneandone_conn.list_logs(q='DELETE',
+ period='LAST_HOUR',
+ sort='-start_date')
+
+ if resource_type == OneAndOneResources.server:
+ _type = 'VM'
+ elif resource_type == OneAndOneResources.private_network:
+ _type = 'PRIVATENETWORK'
+ else:
+ raise Exception(
+ 'Unsupported wait_for delete operation for %s resource' % resource_type)
+
+ for log in logs:
+ if (log['resource']['id'] == resource_id and
+ log['action'] == 'DELETE' and
+ log['type'] == _type and
+ log['status']['state'] == 'OK'):
+ return
+ raise Exception(
+ 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py
new file mode 100644
index 00000000..bfa5f091
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py
@@ -0,0 +1,485 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import json
+import os
+import traceback
+
+HPE_ONEVIEW_IMP_ERR = None
+try:
+ from hpOneView.oneview_client import OneViewClient
+ HAS_HPE_ONEVIEW = True
+except ImportError:
+ HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
+ HAS_HPE_ONEVIEW = False
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+def transform_list_to_dict(list_):
+ """
+ Transforms a list into a dictionary, putting values as keys.
+
+ :arg list list_: List of values
+ :return: dict: dictionary built
+ """
+
+ ret = {}
+
+ if not list_:
+ return ret
+
+ for value in list_:
+ if isinstance(value, Mapping):
+ ret.update(value)
+ else:
+ ret[to_native(value, errors='surrogate_or_strict')] = True
+
+ return ret
+
+
+def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
+ """
+ Merge two lists by the key. It basically:
+
+ 1. Adds the items that are present on updated_list and are absent on original_list.
+
+ 2. Removes items that are absent on updated_list and are present on original_list.
+
+ 3. For all items that are in both lists, overwrites the values from the original item by the updated item.
+
+ :arg list original_list: original list.
+ :arg list updated_list: list with changes.
+ :arg str key: unique identifier.
+ :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
+ if its values are null.
+ :return: list: Lists merged.
+ """
+ ignore_when_null = [] if ignore_when_null is None else ignore_when_null
+
+ if not original_list:
+ return updated_list
+
+ items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
+
+ merged_items = collections.OrderedDict()
+
+ for item in updated_list:
+ item_key = item[key]
+ if item_key in items_map:
+ for ignored_key in ignore_when_null:
+ if ignored_key in item and item[ignored_key] is None:
+ item.pop(ignored_key)
+ merged_items[item_key] = items_map[item_key]
+ merged_items[item_key].update(item)
+ else:
+ merged_items[item_key] = item
+
+ return list(merged_items.values())
+
+
+def _str_sorted(obj):
+ if isinstance(obj, Mapping):
+ return json.dumps(obj, sort_keys=True)
+ else:
+ return str(obj)
+
+
+def _standardize_value(value):
+ """
+ Convert value to string to enhance the comparison.
+
+ :arg value: Any object type.
+
+ :return: str: Converted value.
+ """
+ if isinstance(value, float) and value.is_integer():
+ # Workaround to avoid erroneous comparison between int and float
+ # Removes zero from integer floats
+ value = int(value)
+
+ return str(value)
+
+
+class OneViewModuleException(Exception):
+ """
+ OneView base Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ oneview_response (dict): OneView rest response.
+ """
+
+ def __init__(self, data):
+ self.msg = None
+ self.oneview_response = None
+
+ if isinstance(data, six.string_types):
+ self.msg = data
+ else:
+ self.oneview_response = data
+
+ if data and isinstance(data, dict):
+ self.msg = data.get('message')
+
+ if self.oneview_response:
+ Exception.__init__(self, self.msg, self.oneview_response)
+ else:
+ Exception.__init__(self, self.msg)
+
+
+class OneViewModuleTaskError(OneViewModuleException):
+ """
+ OneView Task Error Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ error_code (str): A code which uniquely identifies the specific error.
+ """
+
+ def __init__(self, msg, error_code=None):
+ super(OneViewModuleTaskError, self).__init__(msg)
+ self.error_code = error_code
+
+
+class OneViewModuleValueError(OneViewModuleException):
+ """
+ OneView Value Error.
+ The exception is raised when the data contains an inappropriate value.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+class OneViewModuleResourceNotFound(OneViewModuleException):
+ """
+ OneView Resource Not Found Exception.
+ The exception is raised when an associated resource was not found.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OneViewModuleBase(object):
+ MSG_CREATED = 'Resource created successfully.'
+ MSG_UPDATED = 'Resource updated successfully.'
+ MSG_DELETED = 'Resource deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Resource is already present.'
+ MSG_ALREADY_ABSENT = 'Resource is already absent.'
+ MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
+
+ ONEVIEW_COMMON_ARGS = dict(
+ config=dict(type='path'),
+ hostname=dict(type='str'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ api_version=dict(type='int'),
+ image_streamer_hostname=dict(type='str')
+ )
+
+ ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
+
+ resource_client = None
+
+ def __init__(self, additional_arg_spec=None, validate_etag_support=False):
+ """
+ OneViewModuleBase constructor.
+
+ :arg dict additional_arg_spec: Additional argument spec definition.
+ :arg bool validate_etag_support: Enables support to eTag validation.
+ """
+ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ self._check_hpe_oneview_sdk()
+ self._create_oneview_client()
+
+ self.state = self.module.params.get('state')
+ self.data = self.module.params.get('data')
+
+ # Preload params for get_all - used by facts
+ self.facts_params = self.module.params.get('params') or {}
+
+ # Preload options as dict - used by facts
+ self.options = transform_list_to_dict(self.module.params.get('options'))
+
+ self.validate_etag_support = validate_etag_support
+
+ def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
+
+ if validate_etag_support:
+ merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
+
+ if additional_arg_spec:
+ merged_arg_spec.update(additional_arg_spec)
+
+ return merged_arg_spec
+
+ def _check_hpe_oneview_sdk(self):
+ if not HAS_HPE_ONEVIEW:
+ self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
+
+ def _create_oneview_client(self):
+ if self.module.params.get('hostname'):
+ config = dict(ip=self.module.params['hostname'],
+ credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
+ api_version=self.module.params['api_version'],
+ image_streamer_ip=self.module.params['image_streamer_hostname'])
+ self.oneview_client = OneViewClient(config)
+ elif not self.module.params['config']:
+ self.oneview_client = OneViewClient.from_environment_variables()
+ else:
+ self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
+
+ @abc.abstractmethod
+ def execute_module(self):
+ """
+ Abstract method, must be implemented by the inheritor.
+
+ This method is called from the run method. It should contains the module logic
+
+ :return: dict: It must return a dictionary with the attributes for the module result,
+ such as ansible_facts, msg and changed.
+ """
+ pass
+
+ def run(self):
+ """
+ Common implementation of the OneView run modules.
+
+ It calls the inheritor 'execute_module' function and sends the return to the Ansible.
+
+ It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
+
+ """
+ try:
+ if self.validate_etag_support:
+ if not self.module.params.get('validate_etag'):
+ self.oneview_client.connection.disable_etag_validation()
+
+ result = self.execute_module()
+
+ if "changed" not in result:
+ result['changed'] = False
+
+ self.module.exit_json(**result)
+
+ except OneViewModuleException as exception:
+ error_msg = '; '.join(to_native(e) for e in exception.args)
+ self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+ def resource_absent(self, resource, method='delete'):
+ """
+ Generic implementation of the absent state for the OneView resources.
+
+ It checks if the resource needs to be removed.
+
+ :arg dict resource: Resource to delete.
+ :arg str method: Function of the OneView client that will be called for resource deletion.
+ Usually delete or remove.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if resource:
+ getattr(self.resource_client, method)(resource)
+
+ return {"changed": True, "msg": self.MSG_DELETED}
+ else:
+ return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
+
+ def get_by_name(self, name):
+ """
+ Generic get by name implementation.
+
+ :arg str name: Resource name to search for.
+
+ :return: The resource found or None.
+ """
+ result = self.resource_client.get_by('name', name)
+ return result[0] if result else None
+
+ def resource_present(self, resource, fact_name, create_method='create'):
+ """
+ Generic implementation of the present state for the OneView resources.
+
+ It checks if the resource needs to be created or updated.
+
+ :arg dict resource: Resource to create or update.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg str create_method: Function of the OneView client that will be called for resource creation.
+ Usually create or add.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+
+ changed = False
+ if "newName" in self.data:
+ self.data["name"] = self.data.pop("newName")
+
+ if not resource:
+ resource = getattr(self.resource_client, create_method)(self.data)
+ msg = self.MSG_CREATED
+ changed = True
+
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ if self.compare(resource, merged_data):
+ msg = self.MSG_ALREADY_PRESENT
+ else:
+ resource = self.resource_client.update(merged_data)
+ changed = True
+ msg = self.MSG_UPDATED
+
+ return dict(
+ msg=msg,
+ changed=changed,
+ ansible_facts={fact_name: resource}
+ )
+
+ def resource_scopes_set(self, state, fact_name, scope_uris):
+ """
+ Generic implementation of the scopes update PATCH for the OneView resources.
+ It checks if the resource needs to be updated with the current scopes.
+ This method is meant to be run after ensuring the present state.
+ :arg dict state: Dict containing the data from the last state results in the resource.
+ It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg list scope_uris: List with all the scope URIs to be added to the resource.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if scope_uris is None:
+ scope_uris = []
+ resource = state['ansible_facts'][fact_name]
+ operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
+
+ if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
+ state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
+ state['changed'] = True
+ state['msg'] = self.MSG_UPDATED
+
+ return state
+
+ def compare(self, first_resource, second_resource):
+ """
+ Recursively compares dictionary contents equivalence, ignoring types and elements order.
+ Particularities of the comparison:
+ - Inexistent key = None
+ - These values are considered equal: None, empty, False
+ - Lists are compared value by value after a sort, if they have same size.
+ - Each element is converted to str before the comparison.
+ :arg dict first_resource: first dictionary
+ :arg dict second_resource: second dictionary
+ :return: bool: True when equal, False when different.
+ """
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The first resource is True / Not Null and the second resource is False / Null
+ if resource1 and not resource2:
+ self.module.log("resource1 and not resource2. " + debug_resources)
+ return False
+
+ # Checks all keys in first dict against the second dict
+ for key in resource1:
+ if key not in resource2:
+ if resource1[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ # If both values are null, empty or False it will be considered equal.
+ elif not resource1[key] and not resource2[key]:
+ continue
+ elif isinstance(resource1[key], Mapping):
+ # recursive call
+ if not self.compare(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif isinstance(resource1[key], list):
+ # change comparison function to compare_list
+ if not self.compare_list(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ # Checks all keys in the second dict, looking for missing elements
+ for key in resource2.keys():
+ if key not in resource1:
+ if resource2[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ return True
+
+ def compare_list(self, first_resource, second_resource):
+ """
+ Recursively compares lists contents equivalence, ignoring types and element orders.
+ Lists with same size are compared value by value after a sort,
+ each element is converted to str before the comparison.
+ :arg list first_resource: first list
+ :arg list second_resource: second list
+ :return: True when equal; False when different.
+ """
+
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The second list is null / empty / False
+ if not resource2:
+ self.module.log("resource 2 is null. " + debug_resources)
+ return False
+
+ if len(resource1) != len(resource2):
+ self.module.log("resources have different length. " + debug_resources)
+ return False
+
+ resource1 = sorted(resource1, key=_str_sorted)
+ resource2 = sorted(resource2, key=_str_sorted)
+
+ for i, val in enumerate(resource1):
+ if isinstance(val, Mapping):
+ # change comparison function to compare dictionaries
+ if not self.compare(val, resource2[i]):
+ self.module.log("resources are different. " + debug_resources)
+ return False
+ elif isinstance(val, list):
+ # recursive call
+ if not self.compare_list(val, resource2[i]):
+ self.module.log("lists are different. " + debug_resources)
+ return False
+ elif _standardize_value(val) != _standardize_value(resource2[i]):
+ self.module.log("values are different. " + debug_resources)
+ return False
+
+ # no differences found
+ return True
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py
new file mode 100644
index 00000000..464e4542
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py
@@ -0,0 +1,121 @@
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+
+
+def online_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+class OnlineException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Online(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'Authorization': "Bearer %s" % self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+ if not results.ok:
+ raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.module.params.get('api_url'), path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+ def patch(self, path, data=None, headers=None):
+ return self.send("PATCH", path, data, headers)
+
+ def update(self, path, data=None, headers=None):
+ return self.send("UPDATE", path, data, headers)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py
new file mode 100644
index 00000000..0b95c618
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py
@@ -0,0 +1,310 @@
+#
+# Copyright 2018 www.privaz.io Valletech AB
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import time
+import ssl
+from os import environ
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import AnsibleModule
+
+
+HAS_PYONE = True
+
+try:
+ from pyone import OneException
+ from pyone.server import OneServer
+except ImportError:
+ OneException = Exception
+ HAS_PYONE = False
+
+
+class OpenNebulaModule:
+ """
+ Base class for all OpenNebula Ansible Modules.
+ This is basically a wrapper of the common arguments, the pyone client and
+ some utility methods.
+ """
+
+ common_args = dict(
+ api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
+ api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
+ api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
+ validate_certs=dict(default=True, type='bool'),
+ wait_timeout=dict(type='int', default=300),
+ )
+
+ def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None):
+
+ module_args = OpenNebulaModule.common_args
+ module_args.update(argument_spec)
+
+ self.module = AnsibleModule(argument_spec=module_args,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive)
+ self.result = dict(changed=False,
+ original_message='',
+ message='')
+ self.one = self.create_one_client()
+
+ self.resolved_parameters = self.resolve_parameters()
+
+ def create_one_client(self):
+ """
+ Creates an XMLPRC client to OpenNebula.
+
+ Returns: the new xmlrpc client.
+
+ """
+
+ # context required for not validating SSL, old python versions won't validate anyway.
+ if hasattr(ssl, '_create_unverified_context'):
+ no_ssl_validation_context = ssl._create_unverified_context()
+ else:
+ no_ssl_validation_context = None
+
+ # Check if the module can run
+ if not HAS_PYONE:
+ self.fail("pyone is required for this module")
+
+ if self.module.params.get("api_url"):
+ url = self.module.params.get("api_url")
+ else:
+ self.fail("Either api_url or the environment variable ONE_URL must be provided")
+
+ if self.module.params.get("api_username"):
+ username = self.module.params.get("api_username")
+ else:
+ self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided")
+
+ if self.module.params.get("api_password"):
+ password = self.module.params.get("api_password")
+ else:
+ self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided")
+
+ session = "%s:%s" % (username, password)
+
+ if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
+ return OneServer(url, session=session, context=no_ssl_validation_context)
+ else:
+ return OneServer(url, session)
+
+ def close_one_client(self):
+ """
+ Close the pyone session.
+ """
+ self.one.server_close()
+
+ def fail(self, msg):
+ """
+ Utility failure method, will ensure pyone is properly closed before failing.
+ Args:
+ msg: human readable failure reason.
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.fail_json(msg=msg)
+
+ def exit(self):
+ """
+ Utility exit method, will ensure pyone is properly closed before exiting.
+
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.exit_json(**self.result)
+
+ def resolve_parameters(self):
+ """
+ This method resolves parameters provided by a secondary ID to the primary ID.
+ For example if cluster_name is present, cluster_id will be introduced by performing
+ the required resolution
+
+ Returns: a copy of the parameters that includes the resolved parameters.
+
+ """
+
+ resolved_params = dict(self.module.params)
+
+ if 'cluster_name' in self.module.params:
+ clusters = self.one.clusterpool.info()
+ for cluster in clusters.CLUSTER:
+ if cluster.NAME == self.module.params.get('cluster_name'):
+ resolved_params['cluster_id'] = cluster.ID
+
+ return resolved_params
+
+ def is_parameter(self, name):
+ """
+ Utility method to check if a parameter was provided or is resolved
+ Args:
+ name: the parameter to check
+ """
+ if name in self.resolved_parameters:
+ return self.get_parameter(name) is not None
+ else:
+ return False
+
+ def get_parameter(self, name):
+ """
+ Utility method for accessing parameters that includes resolved ID
+ parameters from provided Name parameters.
+ """
+ return self.resolved_parameters.get(name)
+
+ def get_host_by_name(self, name):
+ '''
+ Returns a host given its name.
+ Args:
+ name: the name of the host
+
+ Returns: the host object or None if the host is absent.
+
+ '''
+ hosts = self.one.hostpool.info()
+ for h in hosts.HOST:
+ if h.NAME == name:
+ return h
+ return None
+
+ def get_cluster_by_name(self, name):
+ """
+ Returns a cluster given its name.
+ Args:
+ name: the name of the cluster
+
+ Returns: the cluster object or None if the host is absent.
+ """
+
+ clusters = self.one.clusterpool.info()
+ for c in clusters.CLUSTER:
+ if c.NAME == name:
+ return c
+ return None
+
+ def get_template_by_name(self, name):
+ '''
+ Returns a template given its name.
+ Args:
+ name: the name of the template
+
+ Returns: the template object or None if the host is absent.
+
+ '''
+ templates = self.one.templatepool.info()
+ for t in templates.TEMPLATE:
+ if t.NAME == name:
+ return t
+ return None
+
+ def cast_template(self, template):
+ """
+ OpenNebula handles all template elements as strings
+ At some point there is a cast being performed on types provided by the user
+ This function mimics that transformation so that required template updates are detected properly
+ additionally an array will be converted to a comma separated list,
+ which works for labels and hopefully for something more.
+
+ Args:
+ template: the template to transform
+
+ Returns: the transformed template with data casts applied.
+ """
+
+ # TODO: check formally available data types in templates
+ # TODO: some arrays might be converted to space separated
+
+ for key in template:
+ value = template[key]
+ if isinstance(value, dict):
+ self.cast_template(template[key])
+ elif isinstance(value, list):
+ template[key] = ', '.join(value)
+ elif not isinstance(value, string_types):
+ template[key] = str(value)
+
+ def requires_template_update(self, current, desired):
+ """
+ This function will help decide if a template update is required or not
+ If a desired key is missing from the current dictionary an update is required
+ If the intersection of both dictionaries is not deep equal, an update is required
+ Args:
+ current: current template as a dictionary
+ desired: desired template as a dictionary
+
+ Returns: True if a template update is required
+ """
+
+ if not desired:
+ return False
+
+ self.cast_template(desired)
+ intersection = dict()
+ for dkey in desired.keys():
+ if dkey in current.keys():
+ intersection[dkey] = current[dkey]
+ else:
+ return True
+ return not (desired == intersection)
+
+ def wait_for_state(self, element_name, state, state_name, target_states,
+ invalid_states=None, transition_states=None,
+ wait_timeout=None):
+ """
+ Args:
+ element_name: the name of the object we are waiting for: HOST, VM, etc.
+ state: lambda that returns the current state, will be queried until target state is reached
+ state_name: lambda that returns the readable form of a given state
+ target_states: states expected to be reached
+ invalid_states: if any of this states is reached, fail
+ transition_states: when used, these are the valid states during the transition.
+ wait_timeout: timeout period in seconds. Defaults to the provided parameter.
+ """
+
+ if not wait_timeout:
+ wait_timeout = self.module.params.get("wait_timeout")
+
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ current_state = state()
+
+ if current_state in invalid_states:
+ self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
+
+ if transition_states:
+ if current_state not in transition_states:
+ self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
+
+ if current_state in target_states:
+ return True
+
+ time.sleep(self.one.server_retry_interval())
+
+ self.fail(msg="Wait timeout has expired!")
+
+ def run_module(self):
+ """
+ trigger the start of the execution of the module.
+ Returns:
+
+ """
+ try:
+ self.run(self.one, self.module, self.result)
+ except OneException as e:
+ self.fail(msg="OpenNebula Exception: %s" % e)
+
+ def run(self, one, module, result):
+ """
+ to be implemented by subclass with the actual module actions.
+ Args:
+ one: the OpenNebula XMLRPC client
+ module: the Ansible Module object
+ result: the Ansible result
+ """
+ raise NotImplementedError("Method requires implementation")
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
new file mode 100644
index 00000000..72a872fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
@@ -0,0 +1,1962 @@
+# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import logging.config
+import os
+import tempfile
+from datetime import datetime
+from operator import eq
+
+import time
+
+try:
+ import yaml
+
+ import oci
+ from oci.constants import HEADER_NEXT_PAGE
+
+ from oci.exceptions import (
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ConfigFileNotFound,
+ ServiceError,
+ MaximumWaitTimeExceeded,
+ )
+ from oci.identity.identity_client import IdentityClient
+ from oci.object_storage.models import CreateBucketDetails
+ from oci.object_storage.models import UpdateBucketDetails
+ from oci.retry import RetryStrategyBuilder
+ from oci.util import to_dict, Sentinel
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import iteritems
+
+__version__ = "1.6.0-dev"
+
+MAX_WAIT_TIMEOUT_IN_SECONDS = 1200
+
+# If a resource is in one of these states it would be considered inactive
+DEAD_STATES = [
+ "TERMINATING",
+ "TERMINATED",
+ "FAULTY",
+ "FAILED",
+ "DELETING",
+ "DELETED",
+ "UNKNOWN_ENUM_VALUE",
+ "DETACHING",
+ "DETACHED",
+]
+
+# If a resource is in one of these states it would be considered available
+DEFAULT_READY_STATES = [
+ "AVAILABLE",
+ "ACTIVE",
+ "RUNNING",
+ "PROVISIONED",
+ "ATTACHED",
+ "ASSIGNED",
+ "SUCCEEDED",
+ "PENDING_PROVIDER",
+]
+
+# If a resource is in one of these states, it would be considered deleted
+DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"]
+
+
+def get_common_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Return the common set of module arguments for all OCI cloud modules.
+ :param supports_create: Variable to decide whether to add options related to idempotency of create operation.
+ :param supports_wait: Variable to decide whether to add options related to waiting for completion.
+ :return: A dict with applicable module options.
+ """
+ # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ common_args = dict(
+ config_file_location=dict(type="str"),
+ config_profile_name=dict(type="str", default="DEFAULT"),
+ api_user=dict(type="str"),
+ api_user_fingerprint=dict(type="str", no_log=True),
+ api_user_key_file=dict(type="str"),
+ api_user_key_pass_phrase=dict(type="str", no_log=True),
+ auth_type=dict(
+ type="str",
+ required=False,
+ choices=["api_key", "instance_principal"],
+ default="api_key",
+ ),
+ tenancy=dict(type="str"),
+ region=dict(type="str"),
+ )
+
+ if supports_create:
+ common_args.update(
+ key_by=dict(type="list"),
+ force_create=dict(type="bool", default=False),
+ )
+
+ if supports_wait:
+ common_args.update(
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(
+ type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ wait_until=dict(type="str"),
+ )
+
+ return common_args
+
+
+def get_facts_module_arg_spec(filter_by_name=False):
+ # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ facts_module_arg_spec = get_common_arg_spec()
+ if filter_by_name:
+ facts_module_arg_spec.update(name=dict(type="str"))
+ else:
+ facts_module_arg_spec.update(display_name=dict(type="str"))
+ return facts_module_arg_spec
+
+
+def get_oci_config(module, service_client_class=None):
+ """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging
+ any overrides specified for configuration attributes through Ansible module options or environment variables. The
+ order of precedence for deriving the effective configuration dict is:
+ 1. If a config file is provided, use that to setup the initial config dict.
+ 2. If a config profile is specified, use that config profile to setup the config dict.
+ 3. For each authentication attribute, check if an override is provided either through
+ a. Ansible Module option
+ b. Environment variable
+ and override the value in the config dict in that order."""
+ config = {}
+
+ config_file = module.params.get("config_file_location")
+ _debug("Config file through module options - {0} ".format(config_file))
+ if not config_file:
+ if "OCI_CONFIG_FILE" in os.environ:
+ config_file = os.environ["OCI_CONFIG_FILE"]
+ _debug(
+ "Config file through OCI_CONFIG_FILE environment variable - {0}".format(
+ config_file
+ )
+ )
+ else:
+ config_file = "~/.oci/config"
+ _debug("Config file (fallback) - {0} ".format(config_file))
+
+ config_profile = module.params.get("config_profile_name")
+ if not config_profile:
+ if "OCI_CONFIG_PROFILE" in os.environ:
+ config_profile = os.environ["OCI_CONFIG_PROFILE"]
+ else:
+ config_profile = "DEFAULT"
+ try:
+ config = oci.config.from_file(
+ file_location=config_file, profile_name=config_profile
+ )
+ except (
+ ConfigFileNotFound,
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ) as ex:
+ if not _is_instance_principal_auth(module):
+ # When auth_type is not instance_principal, config file is required
+ module.fail_json(msg=str(ex))
+ else:
+ _debug(
+ "Ignore {0} as the auth_type is set to instance_principal".format(
+ str(ex)
+ )
+ )
+ # if instance_principal auth is used, an empty 'config' map is used below.
+
+ config["additional_user_agent"] = "Oracle-Ansible/{0}".format(__version__)
+ # Merge any overrides through other IAM options
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user",
+ env_var_name="OCI_USER_ID",
+ config_attr_name="user",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_fingerprint",
+ env_var_name="OCI_USER_FINGERPRINT",
+ config_attr_name="fingerprint",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_file",
+ env_var_name="OCI_USER_KEY_FILE",
+ config_attr_name="key_file",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_pass_phrase",
+ env_var_name="OCI_USER_KEY_PASS_PHRASE",
+ config_attr_name="pass_phrase",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="tenancy",
+ env_var_name="OCI_TENANCY",
+ config_attr_name="tenancy",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="region",
+ env_var_name="OCI_REGION",
+ config_attr_name="region",
+ )
+
+ # Redirect calls to home region for IAM service.
+ do_not_redirect = module.params.get(
+ "do_not_redirect_to_home_region", False
+ ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION")
+ if service_client_class == IdentityClient and not do_not_redirect:
+ _debug("Region passed for module invocation - {0} ".format(config["region"]))
+ identity_client = IdentityClient(config)
+ region_subscriptions = identity_client.list_region_subscriptions(
+ config["tenancy"]
+ ).data
+ # Replace the region in the config with the home region.
+ [config["region"]] = [
+ rs.region_name for rs in region_subscriptions if rs.is_home_region is True
+ ]
+ _debug(
+ "Setting region in the config to home region - {0} ".format(
+ config["region"]
+ )
+ )
+
+ return config
+
+
+def create_service_client(module, service_client_class):
+ """
+ Creates a service client using the common module options provided by the user.
+ :param module: An AnsibleModule that represents user provided options for a Task
+ :param service_client_class: A class that represents a client to an OCI Service
+ :return: A fully configured client
+ """
+ config = get_oci_config(module, service_client_class)
+ kwargs = {}
+
+ if _is_instance_principal_auth(module):
+ try:
+ signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
+ except Exception as ex:
+ message = (
+ "Failed retrieving certificates from localhost. Instance principal based authentication is only"
+ "possible from within OCI compute instances. Exception: {0}".format(
+ str(ex)
+ )
+ )
+ module.fail_json(msg=message)
+
+ kwargs["signer"] = signer
+
+ # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation
+ try:
+ oci.config.validate_config(config, **kwargs)
+ except oci.exceptions.InvalidConfig as ic:
+ module.fail_json(
+ msg="Invalid OCI configuration. Exception: {0}".format(str(ic))
+ )
+
+ # Create service client class with the signer
+ client = service_client_class(config, **kwargs)
+
+ return client
+
+
+def _is_instance_principal_auth(module):
+ # check if auth type is overridden via module params
+ instance_principal_auth = (
+ "auth_type" in module.params
+ and module.params["auth_type"] == "instance_principal"
+ )
+ if not instance_principal_auth:
+ instance_principal_auth = (
+ "OCI_ANSIBLE_AUTH_TYPE" in os.environ
+ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
+ )
+ return instance_principal_auth
+
+
+def _merge_auth_option(
+ config, module, module_option_name, env_var_name, config_attr_name
+):
+ """Merge the values for an authentication attribute from ansible module options and
+ environment variables with the values specified in a configuration file"""
+ _debug("Merging {0}".format(module_option_name))
+
+ auth_attribute = module.params.get(module_option_name)
+ _debug(
+ "\t Ansible module option {0} = {1}".format(module_option_name, auth_attribute)
+ )
+ if not auth_attribute:
+ if env_var_name in os.environ:
+ auth_attribute = os.environ[env_var_name]
+ _debug(
+ "\t Environment variable {0} = {1}".format(env_var_name, auth_attribute)
+ )
+
+ # An authentication attribute has been provided through an env-variable or an ansible
+ # option and must override the corresponding attribute's value specified in the
+ # config file [profile].
+ if auth_attribute:
+ _debug(
+ "Updating config attribute {0} -> {1} ".format(
+ config_attr_name, auth_attribute
+ )
+ )
+ config.update({config_attr_name: auth_attribute})
+
+
+def bucket_details_factory(bucket_details_type, module):
+ bucket_details = None
+ if bucket_details_type == "create":
+ bucket_details = CreateBucketDetails()
+ elif bucket_details_type == "update":
+ bucket_details = UpdateBucketDetails()
+
+ bucket_details.compartment_id = module.params["compartment_id"]
+ bucket_details.name = module.params["name"]
+ bucket_details.public_access_type = module.params["public_access_type"]
+ bucket_details.metadata = module.params["metadata"]
+
+ return bucket_details
+
+
+def filter_resources(all_resources, filter_params):
+ if not filter_params:
+ return all_resources
+ filtered_resources = []
+ filtered_resources.extend(
+ [
+ resource
+ for resource in all_resources
+ for key, value in filter_params.items()
+ if getattr(resource, key) == value
+ ]
+ )
+ return filtered_resources
+
+
+def list_all_resources(target_fn, **kwargs):
+ """
+ Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is
+ provided as a kwarg, then only resources matching the specified name are returned.
+ :param target_fn: The target OCI SDK paged function to call
+ :param kwargs: All arguments that the OCI SDK paged function expects
+ :return: List of all objects returned by target_fn
+ :raises ServiceError: When the Service returned an Error response
+ :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn
+ """
+ filter_params = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ except ValueError as ex:
+ if "unknown kwargs" in str(ex):
+ if "display_name" in kwargs:
+ if kwargs["display_name"]:
+ filter_params = {"display_name": kwargs["display_name"]}
+ del kwargs["display_name"]
+ elif "name" in kwargs:
+ if kwargs["name"]:
+ filter_params = {"name": kwargs["name"]}
+ del kwargs["name"]
+ response = call_with_backoff(target_fn, **kwargs)
+
+ existing_resources = response.data
+ while response.has_next_page:
+ kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE))
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resources += response.data
+
+ # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources
+ # and return the matching list of resources
+ return filter_resources(existing_resources, filter_params)
+
+
+def _debug(s):
+ get_logger("oci_utils").debug(s)
+
+
+def get_logger(module_name):
+ oci_logging = setup_logging()
+ return oci_logging.getLogger(module_name)
+
+
+def setup_logging(
+ default_level="INFO",
+):
+ """Setup logging configuration"""
+ env_log_path = "LOG_PATH"
+ env_log_level = "LOG_LEVEL"
+
+ default_log_path = tempfile.gettempdir()
+ log_path = os.getenv(env_log_path, default_log_path)
+ log_level_str = os.getenv(env_log_level, default_level)
+ log_level = logging.getLevelName(log_level_str)
+ log_file_path = os.path.join(log_path, "oci_ansible_module.log")
+ logging.basicConfig(filename=log_file_path, filemode="a", level=log_level)
+ return logging
+
+
+def check_and_update_attributes(
+ target_instance, attr_name, input_value, existing_value, changed
+):
+ """
+ This function checks the difference between two resource attributes of literal types and sets the attrbute
+ value in the target instance type holding the attribute.
+ :param target_instance: The instance which contains the attribute whose values to be compared
+ :param attr_name: Name of the attribute whose value required to be compared
+ :param input_value: The value of the attribute provided by user
+ :param existing_value: The value of the attribute in the existing resource
+ :param changed: Flag to indicate whether there is any difference between the values
+ :return: Returns a boolean value indicating whether there is any difference between the values
+ """
+ if input_value is not None and not eq(input_value, existing_value):
+ changed = True
+ target_instance.__setattr__(attr_name, input_value)
+ else:
+ target_instance.__setattr__(attr_name, existing_value)
+ return changed
+
+
+def check_and_update_resource(
+ resource_type,
+ get_fn,
+ kwargs_get,
+ update_fn,
+ primitive_params_update,
+ kwargs_non_primitive_update,
+ module,
+ update_attributes,
+ client=None,
+ sub_attributes_of_update_model=None,
+ wait_applicable=True,
+ states=None,
+):
+
+ """
+ This function handles update operation on a resource. It checks whether update is required and accordingly returns
+ the resource and the changed status.
+ :param wait_applicable: Indicates if the resource support wait
+ :param client: The resource Client class to use to perform the wait checks. This param must be specified if
+ wait_applicable is True
+ :param resource_type: The type of the resource. e.g. "private_ip"
+ :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip
+ :param kwargs_get: Dictionary containing the arguments to be used to call get function.
+ e.g. {"private_ip_id": module.params["private_ip_id"]}
+ :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip
+ :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id']
+ :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get
+ function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed
+ to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"}
+ :param module: Instance of AnsibleModule
+ :param update_attributes: Attributes in update model.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example,
+ {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails.
+ :return: Returns a dictionary containing the "changed" status and the resource.
+ """
+ try:
+ result = dict(changed=False)
+ attributes_to_update, resource = get_attr_to_update(
+ get_fn, kwargs_get, module, update_attributes
+ )
+
+ if attributes_to_update:
+ kwargs_update = get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model,
+ )
+ resource = call_with_backoff(update_fn, **kwargs_update).data
+ if wait_applicable:
+ if client is None:
+ module.fail_json(
+ msg="wait_applicable is True, but client is not specified."
+ )
+ resource = wait_for_resource_lifecycle_state(
+ client, module, True, kwargs_get, get_fn, None, resource, states
+ )
+ result["changed"] = True
+ result[resource_type] = to_dict(resource)
+ return result
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model=None,
+):
+ kwargs_update = dict()
+ for param in primitive_params_update:
+ kwargs_update[param] = module.params[param]
+ for param in kwargs_non_primitive_update:
+ update_object = param()
+ for key in update_object.attribute_map:
+ if key in attributes_to_update:
+ if (
+ sub_attributes_of_update_model
+ and key in sub_attributes_of_update_model
+ ):
+ setattr(update_object, key, sub_attributes_of_update_model[key])
+ else:
+ setattr(update_object, key, module.params[key])
+ kwargs_update[kwargs_non_primitive_update[param]] = update_object
+ return kwargs_update
+
+
+def is_dictionary_subset(sub, super_dict):
+ """
+ This function checks if `sub` dictionary is a subset of `super` dictionary.
+ :param sub: subset dictionary, for example user_provided_attr_value.
+ :param super_dict: super dictionary, for example resources_attr_value.
+ :return: True if sub is contained in super.
+ """
+ for key in sub:
+ if sub[key] != super_dict[key]:
+ return False
+ return True
+
+
+def are_lists_equal(s, t):
+ if s is None and t is None:
+ return True
+
+ if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
+ return False
+
+ if len(s) == 0:
+ return True
+
+ s = to_dict(s)
+ t = to_dict(t)
+
+ if type(s[0]) == dict:
+ # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
+ # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
+ # `service_name` which is not provided in the list of `services` by a user while making an update call; only
+ # `service_id` is provided by the user in the update call.
+ sorted_s = sort_list_of_dictionary(s)
+ sorted_t = sort_list_of_dictionary(t)
+ for index, d in enumerate(sorted_s):
+ if not is_dictionary_subset(d, sorted_t[index]):
+ return False
+ return True
+ else:
+ # Handle lists of primitive types.
+ try:
+ for elem in s:
+ t.remove(elem)
+ except ValueError:
+ return False
+ return not t
+
+
+def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
+ try:
+ resource = call_with_backoff(get_fn, **kwargs_get).data
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ attributes_to_update = []
+
+ for attr in update_attributes:
+ resources_attr_value = getattr(resource, attr, None)
+ user_provided_attr_value = module.params.get(attr, None)
+
+ unequal_list_attr = (
+ type(resources_attr_value) == list or type(user_provided_attr_value) == list
+ ) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
+ unequal_attr = type(resources_attr_value) != list and to_dict(
+ resources_attr_value
+ ) != to_dict(user_provided_attr_value)
+ if unequal_list_attr or unequal_attr:
+ # only update if the user has explicitly provided a value for this attribute
+ # otherwise, no update is necessary because the user hasn't expressed a particular
+ # value for that attribute
+ if module.params.get(attr, None):
+ attributes_to_update.append(attr)
+
+ return attributes_to_update, resource
+
+
+def get_taggable_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Returns an arg_spec that is valid for taggable OCI resources.
+ :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and
+ defined tags.
+ """
+ tag_arg_spec = get_common_arg_spec(supports_create, supports_wait)
+ tag_arg_spec.update(
+ dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))
+ )
+ return tag_arg_spec
+
+
+def add_tags_to_model_from_module(model, module):
+ """
+ Adds free-form and defined tags from an ansible module to a resource model
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: The updated model class with the tags specified by the user.
+ """
+ freeform_tags = module.params.get("freeform_tags", None)
+ defined_tags = module.params.get("defined_tags", None)
+ return add_tags_to_model_class(model, freeform_tags, defined_tags)
+
+
+def add_tags_to_model_class(model, freeform_tags, defined_tags):
+ """
+ Add free-form and defined tags to a resource model.
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param freeform_tags: A dict representing the freeform_tags to be applied to the model
+ :param defined_tags: A dict representing the defined_tags to be applied to the model
+ :return: The updated model class with the tags specified by the user
+ """
+ try:
+ if freeform_tags is not None:
+ _debug("Model {0} set freeform tags to {1}".format(model, freeform_tags))
+ model.__setattr__("freeform_tags", freeform_tags)
+
+ if defined_tags is not None:
+ _debug("Model {0} set defined tags to {1}".format(model, defined_tags))
+ model.__setattr__("defined_tags", defined_tags)
+ except AttributeError as ae:
+ _debug("Model {0} doesn't support tags. Error {1}".format(model, ae))
+
+ return model
+
+
+def check_and_create_resource(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ list_fn,
+ kwargs_list,
+ module,
+ model,
+ existing_resources=None,
+ exclude_attributes=None,
+ dead_states=None,
+ default_attribute_values=None,
+ supports_sort_by_time_created=True,
+):
+ """
+ This function checks whether there is a resource with same attributes as specified in the module options. If not,
+ it creates and returns the resource.
+ :param resource_type: Type of the resource to be created.
+ :param create_fn: Function used in the module to handle create operation. The function should return a dict with
+ keys as resource & changed.
+ :param kwargs_create: Dictionary of parameters for create operation.
+ :param list_fn: List function in sdk to list all the resources of type resource_type.
+ :param kwargs_list: Dictionary of parameters for list operation.
+ :param module: Instance of AnsibleModule
+ :param model: Model used to create a resource.
+ :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name,
+ dns_label.
+ :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults
+ to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"]
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ if module.params.get("force_create", None):
+ _debug("Force creating {0}".format(resource_type))
+ result = call_with_backoff(create_fn, **kwargs_create)
+ return result
+
+ # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource
+ # in case of multiple resource matches.
+ if exclude_attributes is None:
+ exclude_attributes = {}
+ if default_attribute_values is None:
+ default_attribute_values = {}
+ try:
+ if existing_resources is None:
+ if supports_sort_by_time_created:
+ kwargs_list["sort_by"] = "TIMECREATED"
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ except ValueError:
+ # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry
+ kwargs_list.pop("sort_by", None)
+ try:
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ # Handle errors like 404 due to bad arguments to the list_all_resources call.
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ result = dict()
+
+ attributes_to_consider = _get_attributes_to_consider(
+ exclude_attributes, model, module
+ )
+ if "defined_tags" not in default_attribute_values:
+ default_attribute_values["defined_tags"] = {}
+ resource_matched = None
+ _debug(
+ "Trying to find a match within {0} existing resources".format(
+ len(existing_resources)
+ )
+ )
+
+ for resource in existing_resources:
+ if _is_resource_active(resource, dead_states):
+ _debug(
+ "Comparing user specified values {0} against an existing resource's "
+ "values {1}".format(module.params, to_dict(resource))
+ )
+ if does_existing_resource_match_user_inputs(
+ to_dict(resource),
+ module,
+ attributes_to_consider,
+ exclude_attributes,
+ default_attribute_values,
+ ):
+ resource_matched = to_dict(resource)
+ break
+
+ if resource_matched:
+ _debug("Resource with same attributes found: {0}.".format(resource_matched))
+ result[resource_type] = resource_matched
+ result["changed"] = False
+ else:
+ _debug("No matching resource found. Attempting to create a new resource.")
+ result = call_with_backoff(create_fn, **kwargs_create)
+
+ return result
+
+
+def _get_attributes_to_consider(exclude_attributes, model, module):
+ """
+ Determine the attributes to detect if an existing resource already matches the requested resource state
+ :param exclude_attributes: Attributes to not consider for matching
+ :param model: The model class used to create the Resource
+ :param module: An instance of AnsibleModule that contains user's desires around a resource's state
+ :return: A list of attributes that needs to be matched
+ """
+
+ # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list
+ # of attributes to consider for matching.
+ if "key_by" in module.params and module.params["key_by"] is not None:
+ attributes_to_consider = module.params["key_by"]
+ else:
+ # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource.
+ attributes_to_consider = list(model.attribute_map)
+ if "freeform_tags" in attributes_to_consider:
+ attributes_to_consider.remove("freeform_tags")
+ # Temporarily removing node_count as the exisiting resource does not reflect it
+ if "node_count" in attributes_to_consider:
+ attributes_to_consider.remove("node_count")
+ _debug("attributes to consider: {0}".format(attributes_to_consider))
+ return attributes_to_consider
+
+
+def _is_resource_active(resource, dead_states):
+ if dead_states is None:
+ dead_states = DEAD_STATES
+
+ if "lifecycle_state" not in resource.attribute_map:
+ return True
+ return resource.lifecycle_state not in dead_states
+
+
+def is_attr_assigned_default(default_attribute_values, attr, assigned_value):
+ if not default_attribute_values:
+ return False
+
+ if attr in default_attribute_values:
+ default_val_for_attr = default_attribute_values.get(attr, None)
+ if isinstance(default_val_for_attr, dict):
+ # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the
+ # existing resource's attribute is also empty.
+ if not default_val_for_attr:
+ return not assigned_value
+ # only compare keys that are in default_attribute_values[attr]
+ # this is to ensure forward compatibility when the API returns new keys that are not known during
+ # the time when the module author provided default values for the attribute
+ keys = {}
+ for k, v in iteritems(assigned_value.items()):
+ if k in default_val_for_attr:
+ keys[k] = v
+
+ return default_val_for_attr == keys
+ # non-dict, normal comparison
+ return default_val_for_attr == assigned_value
+ else:
+ # module author has not provided a default value for attr
+ return True
+
+
+def create_resource(resource_type, create_fn, kwargs_create, module):
+ """
+ Create an OCI resource
+ :param resource_type: Type of the resource to be created. e.g.: "vcn"
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn
+ :param module: Instance of AnsibleModule
+ """
+ result = dict(changed=False)
+ try:
+ resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data)
+ _debug("Created {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+ result[resource_type] = resource
+ return result
+ except (ServiceError, TypeError) as ex:
+ module.fail_json(msg=str(ex))
+
+
+def does_existing_resource_match_user_inputs(
+ existing_resource,
+ module,
+ attributes_to_compare,
+ exclude_attributes,
+ default_attribute_values=None,
+):
+ """
+ Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'.
+ :param existing_resource: A dictionary representing an existing resource's values.
+ :param module: The AnsibleModule representing the options provided by the user.
+ :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource
+ matches the desire state of the resource expressed by the user in 'module'.
+ :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the
+ resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values
+ like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like
+ 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys.
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: True if the values for the list of attributes is the same in the existing_resource and module instances.
+ """
+ if not default_attribute_values:
+ default_attribute_values = {}
+ for attr in attributes_to_compare:
+ attribute_with_default_metadata = None
+ if attr in existing_resource:
+ resources_value_for_attr = existing_resource[attr]
+ # Check if the user has explicitly provided the value for attr.
+ user_provided_value_for_attr = _get_user_provided_value(module, attr)
+ if user_provided_value_for_attr is not None:
+ res = [True]
+ check_if_user_value_matches_resources_attr(
+ attr,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ if not res[0]:
+ _debug(
+ "Mismatch on attribute '{0}'. User provided value is {1} & existing resource's value"
+ "is {2}.".format(
+ attr, user_provided_value_for_attr, resources_value_for_attr
+ )
+ )
+ return False
+ else:
+ # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can
+ # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and
+ # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude
+ if (
+ exclude_attributes.get(attr) is None
+ and resources_value_for_attr is not None
+ ):
+ if module.argument_spec.get(attr):
+ attribute_with_default_metadata = module.argument_spec.get(attr)
+ default_attribute_value = attribute_with_default_metadata.get(
+ "default", None
+ )
+ if default_attribute_value is not None:
+ if existing_resource[attr] != default_attribute_value:
+ return False
+ # Check if attr has a value that is not default. For example, a custom `security_list_id`
+ # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a
+ # value that is not the default, then it must be considered a mismatch and false returned.
+ elif not is_attr_assigned_default(
+ default_attribute_values, attr, existing_resource[attr]
+ ):
+ return False
+
+ else:
+ _debug(
+ "Attribute {0} is in the create model of resource {1}"
+ "but doesn't exist in the get model of the resource".format(
+ attr, existing_resource.__class__
+ )
+ )
+ return True
+
+
+def tuplize(d):
+ """
+ This function takes a dictionary and converts it to a list of tuples recursively.
+ :param d: A dictionary.
+ :return: List of tuples.
+ """
+ list_of_tuples = []
+ key_list = sorted(list(d.keys()))
+ for key in key_list:
+ if type(d[key]) == list:
+ # Convert a value which is itself a list of dict to a list of tuples.
+ if d[key] and type(d[key][0]) == dict:
+ sub_tuples = []
+ for sub_dict in d[key]:
+ sub_tuples.append(tuplize(sub_dict))
+ # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element
+ # in the tuple a boolean `True` if value is None so that attributes with None value are put at last
+ # in the sorted list.
+ list_of_tuples.append((sub_tuples is None, key, sub_tuples))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ elif type(d[key]) == dict:
+ tupled_value = tuplize(d[key])
+ list_of_tuples.append((tupled_value is None, key, tupled_value))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ return list_of_tuples
+
+
+def get_key_for_comparing_dict(d):
+ tuple_form_of_d = tuplize(d)
+ return tuple_form_of_d
+
+
+def sort_dictionary(d):
+ """
+ This function sorts values of a dictionary recursively.
+ :param d: A dictionary.
+ :return: Dictionary with sorted elements.
+ """
+ sorted_d = {}
+ for key in d:
+ if type(d[key]) == list:
+ if d[key] and type(d[key][0]) == dict:
+ sorted_value = sort_list_of_dictionary(d[key])
+ sorted_d[key] = sorted_value
+ else:
+ sorted_d[key] = sorted(d[key])
+ elif type(d[key]) == dict:
+ sorted_d[key] = sort_dictionary(d[key])
+ else:
+ sorted_d[key] = d[key]
+ return sorted_d
+
+
+def sort_list_of_dictionary(list_of_dict):
+ """
+ This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of
+ individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used.
+ :param list_of_dict: List of dictionaries.
+ :return: A sorted dictionary.
+ """
+ list_with_sorted_dict = []
+ for d in list_of_dict:
+ sorted_d = sort_dictionary(d)
+ list_with_sorted_dict.append(sorted_d)
+ return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict)
+
+
+def check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+):
+ if isinstance(default_attribute_values.get(attribute_name), dict):
+ default_attribute_values = default_attribute_values.get(attribute_name)
+
+ if isinstance(exclude_attributes.get(attribute_name), dict):
+ exclude_attributes = exclude_attributes.get(attribute_name)
+
+ if isinstance(resources_value_for_attr, list) or isinstance(
+ user_provided_value_for_attr, list
+ ):
+ # Perform a deep equivalence check for a List attribute
+ if exclude_attributes.get(attribute_name):
+ return
+ if (
+ user_provided_value_for_attr is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ user_provided_value_for_attr = default_attribute_values.get(attribute_name)
+
+ if resources_value_for_attr is None and user_provided_value_for_attr is None:
+ return
+
+ if (
+ resources_value_for_attr is None
+ and len(user_provided_value_for_attr) >= 0
+ or user_provided_value_for_attr is None
+ and len(resources_value_for_attr) >= 0
+ ):
+ res[0] = False
+ return
+
+ if (
+ resources_value_for_attr is not None
+ and user_provided_value_for_attr is not None
+ and len(resources_value_for_attr) != len(user_provided_value_for_attr)
+ ):
+ res[0] = False
+ return
+
+ if (
+ user_provided_value_for_attr
+ and type(user_provided_value_for_attr[0]) == dict
+ ):
+ # Process a list of dict
+ sorted_user_provided_value_for_attr = sort_list_of_dictionary(
+ user_provided_value_for_attr
+ )
+ sorted_resources_value_for_attr = sort_list_of_dictionary(
+ resources_value_for_attr
+ )
+
+ else:
+ sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr)
+ sorted_resources_value_for_attr = sorted(resources_value_for_attr)
+
+ # Walk through the sorted list values of the resource's value for this attribute, and compare against user
+ # provided values.
+ for index, resources_value_for_attr_part in enumerate(
+ sorted_resources_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr_part,
+ sorted_user_provided_value_for_attr[index],
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+
+ elif isinstance(resources_value_for_attr, dict):
+ # Perform a deep equivalence check for dict typed attributes
+
+ if not resources_value_for_attr and user_provided_value_for_attr:
+ res[0] = False
+ for key in resources_value_for_attr:
+ if (
+ user_provided_value_for_attr is not None
+ and user_provided_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr.get(key),
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ if exclude_attributes.get(key) is None:
+ if default_attribute_values.get(key) is not None:
+ user_provided_value_for_attr = default_attribute_values.get(key)
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ res[0] = is_attr_assigned_default(
+ default_attribute_values,
+ attribute_name,
+ resources_value_for_attr.get(key),
+ )
+
+ elif resources_value_for_attr != user_provided_value_for_attr:
+ if (
+ exclude_attributes.get(attribute_name) is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ # As the user has not specified a value for an optional attribute, if the existing resource's
+ # current state has a DEFAULT value for that attribute, we must not consider this incongruence
+ # an issue and continue with other checks. If the existing resource's value for the attribute
+ # is not the default value, then the existing resource is not a match.
+ if not is_attr_assigned_default(
+ default_attribute_values, attribute_name, resources_value_for_attr
+ ):
+ res[0] = False
+ elif user_provided_value_for_attr is not None:
+ res[0] = False
+
+
+def are_dicts_equal(
+ option_name,
+ existing_resource_dict,
+ user_provided_dict,
+ exclude_list,
+ default_attribute_values,
+):
+ if not user_provided_dict:
+ # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around
+ # this optional attribute. Check if existing_resource_dict matches default.
+ # For example, source_details attribute in volume is optional and does not have any defaults.
+ return is_attr_assigned_default(
+ default_attribute_values, option_name, existing_resource_dict
+ )
+
+ # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal
+ if not existing_resource_dict and user_provided_dict:
+ return False
+
+ # check if all keys of an existing resource's dict attribute matches user-provided dict's entries
+ for sub_attr in existing_resource_dict:
+ # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource.
+ if sub_attr in user_provided_dict:
+ if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]:
+ _debug(
+ "Failed to match: Existing resource's attr {0} sub-attr {1} value is {2}, while user "
+ "provided value is {3}".format(
+ option_name,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ user_provided_dict.get(sub_attr, None),
+ )
+ )
+ return False
+
+ # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value.
+ else:
+ if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list):
+ default_value_for_dict_attr = default_attribute_values.get(
+ option_name, None
+ )
+ if default_value_for_dict_attr:
+ # if a default value for the sub-attr was provided by the module author, fail if the existing
+ # resource's value for the sub-attr is not the default
+ if not is_attr_assigned_default(
+ default_value_for_dict_attr,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ ):
+ return False
+ else:
+ # No default value specified by module author for sub_attr
+ _debug(
+ "Consider as match: Existing resource's attr {0} sub-attr {1} value is {2}, while user did"
+ "not provide a value for it. The module author also has not provided a default value for it"
+ "or marked it for exclusion. So ignoring this attribute during matching and continuing with"
+ "other checks".format(
+ option_name, sub_attr, existing_resource_dict[sub_attr]
+ )
+ )
+
+ return True
+
+
+def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list):
+ """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the
+ key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map
+ option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """
+ for exclude_item in exclude_list:
+ if isinstance(exclude_item, dict):
+ if map_option_name in exclude_item:
+ if option_key in exclude_item[map_option_name]:
+ return True
+ return False
+
+
+def create_and_wait(
+ resource_type,
+ client,
+ create_fn,
+ kwargs_create,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to create a resource and wait for the resource to get into the state as specified in the module
+ options.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def update_and_wait(
+ resource_type,
+ client,
+ update_fn,
+ kwargs_update,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to update a resource and wait for the resource to get into the state as specified in the module
+ options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn
+ :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param states: List of lifecycle states to watch for while waiting after update_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ update_fn,
+ kwargs_update,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get=kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def create_or_update_resource_and_wait(
+ resource_type,
+ function,
+ kwargs_function,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ update_target_resource_id_in_get_param=False,
+ kwargs_get=None,
+):
+ """
+ A utility function to create or update a resource and wait for the resource to get into the state as specified in
+ the module options.
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param function: Function in the SDK to create or update the resource.
+ :param kwargs_function: Dictionary containing arguments to be used to call the create or update function
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ result = create_resource(resource_type, function, kwargs_function, module)
+ resource = result[resource_type]
+ result[resource_type] = wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type,
+ )
+ return result
+
+
+def wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type=None,
+):
+ """
+ A utility function to wait for the resource to get into the state as specified in
+ the module options.
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ if wait_applicable and module.params.get("wait", None):
+ if resource_type == "compartment":
+ # An immediate attempt to retrieve a compartment after a compartment is created fails with
+ # 'Authorization failed or requested resource not found', 'status': 404}.
+ # This is because it takes few seconds for the permissions on a compartment to be ready.
+ # Wait for few seconds before attempting a get call on compartment.
+ _debug(
+ "Pausing execution for permission on the newly created compartment to be ready."
+ )
+ time.sleep(15)
+ if kwargs_get:
+ _debug(
+ "Waiting for resource to reach READY state. get_args: {0}".format(
+ kwargs_get
+ )
+ )
+ response_get = call_with_backoff(get_fn, **kwargs_get)
+ else:
+ _debug(
+ "Waiting for resource with id {0} to reach READY state.".format(
+ resource["id"]
+ )
+ )
+ response_get = call_with_backoff(get_fn, **{get_param: resource["id"]})
+ if states is None:
+ states = module.params.get("wait_until") or DEFAULT_READY_STATES
+ resource = to_dict(
+ oci.wait_until(
+ client,
+ response_get,
+ evaluate_response=lambda r: r.data.lifecycle_state in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ ).data
+ )
+ return resource
+
+
+def wait_on_work_request(client, response, module):
+ try:
+ if module.params.get("wait", None):
+ _debug(
+ "Waiting for work request with id {0} to reach SUCCEEDED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "SUCCEEDED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ else:
+ _debug(
+ "Waiting for work request with id {0} to reach ACCEPTED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "ACCEPTED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ except MaximumWaitTimeExceeded as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ return wait_response.data
+
+
+def delete_and_wait(
+ resource_type,
+ client,
+ get_fn,
+ kwargs_get,
+ delete_fn,
+ kwargs_delete,
+ module,
+ states=None,
+ wait_applicable=True,
+ process_work_request=False,
+):
+ """A utility function to delete a resource and wait for the resource to get into the state as specified in the
+ module options.
+ :param wait_applicable: Specifies if wait for delete is applicable for this resource
+ :param resource_type: Type of the resource to be deleted. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]}
+ :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn
+ :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]}
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed,
+ defaults to ["TERMINATED", "DETACHED", "DELETED"].
+ :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ states_set = set(["DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"])
+ result = dict(changed=False)
+ result[resource_type] = dict()
+ try:
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ if resource:
+ if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set:
+ response = call_with_backoff(delete_fn, **kwargs_delete)
+ if process_work_request:
+ wr_id = response.headers.get("opc-work-request-id")
+ get_wr_response = call_with_backoff(
+ client.get_work_request, work_request_id=wr_id
+ )
+ result["work_request"] = to_dict(
+ wait_on_work_request(client, get_wr_response, module)
+ )
+ # Set changed to True as work request has been created to delete the resource.
+ result["changed"] = True
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ else:
+ _debug("Deleted {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+
+ if wait_applicable and module.params.get("wait", None):
+ if states is None:
+ states = (
+ module.params.get("wait_until")
+ or DEFAULT_TERMINATED_STATES
+ )
+ try:
+ wait_response = oci.wait_until(
+ client,
+ get_fn(**kwargs_get),
+ evaluate_response=lambda r: r.data.lifecycle_state
+ in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ succeed_on_not_found=True,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+ else:
+ # While waiting for resource to get into terminated state, if the resource is not found.
+ _debug(
+ "API returned Status:404(Not Found) while waiting for resource to get into"
+ " terminated state."
+ )
+ resource["lifecycle_state"] = "DELETED"
+ result[resource_type] = resource
+ return result
+ # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found.
+ if type(wait_response) is not Sentinel:
+ resource = to_dict(wait_response.data)
+ else:
+ resource["lifecycle_state"] = "DELETED"
+
+ result[resource_type] = resource
+ else:
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ except ServiceError as ex:
+ # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
+ # resource is not available, instead of the expected 404. So working around this for now.
+ if type(client) == oci.dns.DnsClient:
+ if ex.status == 400 and ex.code == "InvalidParameter":
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ elif ex.status != 404:
+ module.fail_json(msg=ex.message)
+ result[resource_type] = dict()
+ return result
+
+
+def are_attrs_equal(current_resource, module, attributes):
+ """
+ Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI
+ Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a
+ resource needs to be updated.
+ :param current_resource: A resource model instance
+ :param module: The AnsibleModule representing the options provided by the user
+ :param attributes: A list of attributes that would need to be compared in the model and the module instances.
+ :return: True if the values for the list of attributes is the same in the model and module instances
+ """
+ for attr in attributes:
+ curr_value = getattr(current_resource, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if user_provided_value is not None:
+ if curr_value != user_provided_value:
+ _debug(
+ "are_attrs_equal - current resource's attribute "
+ + attr
+ + " value is "
+ + str(curr_value)
+ + " and this doesn't match user provided value of "
+ + str(user_provided_value)
+ )
+ return False
+ return True
+
+
+def _get_user_provided_value(module, attribute_name):
+ """
+ Returns the user provided value for "attribute_name". We consider aliases in the module.
+ """
+ user_provided_value = module.params.get(attribute_name, None)
+ if user_provided_value is None:
+ # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using
+ # option X, then user provided value for attribute_name is equal to value for X.
+ # Get option name for attribute_name from module.aliases.
+ # module.aliases is a dictionary with key as alias name and its value as option name.
+ option_alias_for_attribute = module.aliases.get(attribute_name, None)
+ if option_alias_for_attribute is not None:
+ user_provided_value = module.params.get(option_alias_for_attribute, None)
+ return user_provided_value
+
+
+def update_model_with_user_options(curr_model, update_model, module):
+ """
+ Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different
+ from the values in the 'curr_model'.
+ :param curr_model: A resource model instance representing the state of the current resource
+ :param update_model: An instance of the update resource model for the current resource's type
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource
+ state to satisfy the user's requested state.
+ """
+ attributes = update_model.attribute_map.keys()
+ for attr in attributes:
+ curr_value_for_attr = getattr(curr_model, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if curr_value_for_attr != user_provided_value:
+ if user_provided_value is not None:
+ # Only update if a user has specified a value for an option
+ _debug(
+ "User requested {0} for attribute {1}, whereas the current value is {2}. So adding it "
+ "to the update model".format(
+ user_provided_value, attr, curr_value_for_attr
+ )
+ )
+ setattr(update_model, attr, user_provided_value)
+ else:
+ # Always set current values of the resource in the update model if there is no request for change in
+ # values
+ setattr(update_model, attr, curr_value_for_attr)
+ return update_model
+
+
+def _get_retry_strategy():
+ retry_strategy_builder = RetryStrategyBuilder(
+ max_attempts_check=True,
+ max_attempts=10,
+ retry_max_wait_between_calls_seconds=30,
+ retry_base_sleep_time_seconds=3,
+ backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE,
+ )
+ retry_strategy_builder.add_service_error_check(
+ service_error_retry_config={
+ 429: [],
+ 400: ["QuotaExceeded", "LimitExceeded"],
+ 409: ["Conflict"],
+ },
+ service_error_retry_on_any_5xx=True,
+ )
+ return retry_strategy_builder.get_retry_strategy()
+
+
+def call_with_backoff(fn, **kwargs):
+ if "retry_strategy" not in kwargs:
+ kwargs["retry_strategy"] = _get_retry_strategy()
+ try:
+ return fn(**kwargs)
+ except TypeError as te:
+ if "unexpected keyword argument" in str(te):
+ # to handle older SDKs that did not support retry_strategy
+ del kwargs["retry_strategy"]
+ return fn(**kwargs)
+ else:
+ # A validation error raised by the SDK, throw it back
+ raise
+
+
+def generic_hash(obj):
+ """
+ Compute a hash of all the fields in the object
+ :param obj: Object whose hash needs to be computed
+ :return: a hash value for the object
+ """
+ sum = 0
+ for field in obj.attribute_map.keys():
+ field_value = getattr(obj, field)
+ if isinstance(field_value, list):
+ for value in field_value:
+ sum = sum + hash(value)
+ elif isinstance(field_value, dict):
+ for k, v in field_value.items():
+ sum = sum + hash(hash(k) + hash(":") + hash(v))
+ else:
+ sum = sum + hash(getattr(obj, field))
+ return sum
+
+
+def generic_eq(s, other):
+ if other is None:
+ return False
+ return s.__dict__ == other.__dict__
+
+
+def generate_subclass(parent_class):
+ """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within
+ the parent class"""
+ dict_of_method_in_subclass = {
+ "__init__": parent_class.__init__,
+ "__hash__": generic_hash,
+ "__eq__": generic_eq,
+ }
+ subclass_name = "GeneratedSub" + parent_class.__name__
+ generated_sub_class = type(
+ subclass_name, (parent_class,), dict_of_method_in_subclass
+ )
+ return generated_sub_class
+
+
+def create_hashed_instance(class_type):
+ hashed_class = generate_subclass(class_type)
+ return hashed_class()
+
+
+def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None):
+ if object_with_values is None:
+ return None
+ hashed_class_instances = []
+ for object_with_value in object_with_values:
+ hashed_class_instances.append(
+ get_hashed_object(class_type, object_with_value, attributes_class_type)
+ )
+ return hashed_class_instances
+
+
+def get_hashed_object(
+ class_type, object_with_value, attributes_class_type=None, supported_attributes=None
+):
+ """
+ Convert any class instance into hashable so that the
+ instances are eligible for various comparison
+ operation available under set() object.
+ :param class_type: Any class type whose instances needs to be hashable
+ :param object_with_value: Instance of the class type with values which
+ would be set in the resulting isinstance
+ :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance
+ :param supported_attributes: A list of attributes which should be considered while populating the instance
+ with the values in the object. This helps in avoiding new attributes of the class_type which are still not
+ supported by the current implementation.
+ :return: A hashable instance with same state of the provided object_with_value
+ """
+ if object_with_value is None:
+ return None
+
+ HashedClass = generate_subclass(class_type)
+ hashed_class_instance = HashedClass()
+
+ if supported_attributes:
+ class_attributes = list(
+ set(hashed_class_instance.attribute_map) & set(supported_attributes)
+ )
+ else:
+ class_attributes = hashed_class_instance.attribute_map
+
+ for attribute in class_attributes:
+ attribute_value = getattr(object_with_value, attribute)
+ if attributes_class_type:
+ for attribute_class_type in attributes_class_type:
+ if isinstance(attribute_value, attribute_class_type):
+ attribute_value = get_hashed_object(
+ attribute_class_type, attribute_value
+ )
+ hashed_class_instance.__setattr__(attribute, attribute_value)
+
+ return hashed_class_instance
+
+
+def update_class_type_attr_difference(
+ update_class_details, existing_instance, attr_name, attr_class, input_attr_value
+):
+ """
+ Checks the difference and updates an attribute which is represented by a class
+ instance. Not aplicable if the attribute type is a primitive value.
+ For example, if a class name is A with an attribute x, then if A.x = X(), then only
+ this method works.
+ :param update_class_details The instance which should be updated if there is change in
+ attribute value
+ :param existing_instance The instance whose attribute value is compared with input
+ attribute value
+ :param attr_name Name of the attribute whose value should be compared
+ :param attr_class Class type of the attribute
+ :param input_attr_value The value of input attribute which should replaced the current
+ value in case of mismatch
+ :return: A boolean value indicating whether attribute value has been replaced
+ """
+ changed = False
+ # Here existing attribute values is an instance
+ existing_attr_value = get_hashed_object(
+ attr_class, getattr(existing_instance, attr_name)
+ )
+ if input_attr_value is None:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+ else:
+ changed = not input_attr_value.__eq__(existing_attr_value)
+ if changed:
+ update_class_details.__setattr__(attr_name, input_attr_value)
+ else:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+
+ return changed
+
+
+def get_existing_resource(target_fn, module, **kwargs):
+ """
+ Returns the requested resource if it exists based on the input arguments.
+ :param target_fn The function which should be used to find the requested resource
+ :param module Instance of AnsibleModule attribute value
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: Instance of requested resource
+ """
+ existing_resource = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resource = response.data
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+
+ return existing_resource
+
+
+def get_attached_instance_info(
+ module, lookup_attached_instance, list_attachments_fn, list_attachments_args
+):
+ config = get_oci_config(module)
+ identity_client = create_service_client(module, IdentityClient)
+
+ volume_attachments = []
+
+ if lookup_attached_instance:
+ # Get all the compartments in the tenancy
+ compartments = to_dict(
+ identity_client.list_compartments(
+ config.get("tenancy"), compartment_id_in_subtree=True
+ ).data
+ )
+ # For each compartment, get the volume attachments for the compartment_id with the other args in
+ # list_attachments_args.
+ for compartment in compartments:
+ list_attachments_args["compartment_id"] = compartment["id"]
+ try:
+ volume_attachments += list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment
+ except ServiceError as ex:
+ if ex.status == 404:
+ pass
+
+ else:
+ volume_attachments = list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ volume_attachments = to_dict(volume_attachments)
+ # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or
+ # ATTACHED state
+
+ return next(
+ (
+ volume_attachment
+ for volume_attachment in volume_attachments
+ if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]
+ ),
+ None,
+ )
+
+
+def check_mode(fn):
+ def wrapper(*args, **kwargs):
+ if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None):
+ return fn(*args, **kwargs)
+ return None
+
+ return wrapper
+
+
+def check_and_return_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if input_component_list:
+ existing_components, changed = get_component_list_difference(
+ input_component_list,
+ existing_components,
+ purge_components,
+ delete_components,
+ )
+ else:
+ existing_components = []
+ changed = True
+ return existing_components, changed
+
+
+def get_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if delete_components:
+ if existing_components is None:
+ return None, False
+ component_differences = set(existing_components).intersection(
+ set(input_component_list)
+ )
+ if component_differences:
+ return list(set(existing_components) - component_differences), True
+ else:
+ return None, False
+ if existing_components is None:
+ return input_component_list, True
+ if purge_components:
+ components_differences = set(input_component_list).symmetric_difference(
+ set(existing_components)
+ )
+
+ if components_differences:
+ return input_component_list, True
+
+ components_differences = set(input_component_list).difference(
+ set(existing_components)
+ )
+ if components_differences:
+ return list(components_differences) + existing_components, True
+ return None, False
+
+
+def write_to_file(path, content):
+ with open(to_bytes(path), "wb") as dest_file:
+ dest_file.write(content)
+
+
+def get_target_resource_from_list(
+ module, list_resource_fn, target_resource_id=None, **kwargs
+):
+ """
+ Returns a resource filtered by identifer from a list of resources. This method should be
+ used as an alternative of 'get resource' method when 'get resource' is nor provided by
+ resource api. This method returns a wrapper of response object but that should not be
+ used as an input to 'wait_until' utility as this is only a partial wrapper of response object.
+ :param module The AnsibleModule representing the options provided by the user
+ :param list_resource_fn The function which lists all the resources
+ :param target_resource_id The identifier of the resource which should be filtered from the list
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: A custom wrapper which partially wraps a response object where the data field contains the target
+ resource, if found.
+ """
+
+ class ResponseWrapper:
+ def __init__(self, data):
+ self.data = data
+
+ try:
+ resources = list_all_resources(list_resource_fn, **kwargs)
+ if resources is not None:
+ for resource in resources:
+ if resource.id == target_resource_id:
+ # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish
+ # object
+ return ResponseWrapper(data=resource)
+ return ResponseWrapper(data=None)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py
new file mode 100644
index 00000000..f0d6f88e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py
@@ -0,0 +1,314 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from distutils.version import LooseVersion
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ return dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return psycopg2 connection object.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+ ensure_required_libs(module)
+
+ db_connection = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ except Exception as e:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ return db_connection
+
+
+def exec_sql(obj, query, query_params=None, return_bool=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or a boolean value.
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ return_bool (bool) -- return True instead of rows if a query was successfully executed.
+ It's necessary for statements that don't return any result like DDL queries (default False).
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not return_bool:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ # Might be different in the modules:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ return kw
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ # If role is in a group now, pass:
+ if self.__check_membership(group, role):
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ # If role is not in a group now, pass:
+ if not self.__check_membership(group, role):
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def __check_membership(self, src_role, dst_role):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
+ membership = []
+ if res:
+ membership = res[0][0]
+
+ if not membership:
+ return False
+
+ if src_role in membership:
+ return True
+
+ return False
+
+ def __check_roles_exist(self):
+ existent_groups = self.__roles_exist(self.groups)
+ existent_roles = self.__roles_exist(self.target_roles)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py
new file mode 100644
index 00000000..666f8777
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import time
+import re
+import traceback
+
+PROXMOXER_IMP_ERR = None
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+ PROXMOXER_IMP_ERR = traceback.format_exc()
+
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+
+def proxmox_auth_argument_spec():
+ return dict(
+ api_host=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_HOST'])
+ ),
+ api_user=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_USER'])
+ ),
+ api_password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['PROXMOX_PASSWORD'])
+ ),
+ api_token_id=dict(type='str',
+ no_log=False
+ ),
+ api_token_secret=dict(type='str',
+ no_log=True
+ ),
+ validate_certs=dict(type='bool',
+ default=False
+ ),
+ )
+
+
+def proxmox_to_ansible_bool(value):
+ '''Convert Proxmox representation of a boolean to be ansible-friendly'''
+ return True if value == 1 else False
+
+
+class ProxmoxAnsible(object):
+ """Base class for Proxmox modules"""
+ def __init__(self, module):
+ self.module = module
+ self.proxmox_api = self._connect()
+ # Test token validity
+ try:
+ self.proxmox_api.version.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e, exception=traceback.format_exc())
+
+ def _connect(self):
+ api_host = self.module.params['api_host']
+ api_user = self.module.params['api_user']
+ api_password = self.module.params['api_password']
+ api_token_id = self.module.params['api_token_id']
+ api_token_secret = self.module.params['api_token_secret']
+ validate_certs = self.module.params['validate_certs']
+
+ auth_args = {'user': api_user}
+ if api_password:
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ except Exception as e:
+ self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py
new file mode 100644
index 00000000..ebd41b1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
+except ImportError:
+ HAS_PURITY_FB = False
+
+from functools import wraps
+from os import environ
+from os import path
+import platform
+
+VERSION = 1.2
+USER_AGENT_BASE = 'Ansible'
+API_AGENT_VERSION = 1.5
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ array_name = module.params['fa_url']
+ api = module.params['api_token']
+
+ if array_name and api:
+ system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
+ elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
+ system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
+ else:
+ module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
+ return system
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ blade_name = module.params['fb_url']
+ api = module.params['api_token']
+
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
+ blade = PurityFb(environ.get('PUREFB_URL'))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get('PUREFB_API'))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
+ return blade
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py
new file mode 100644
index 00000000..e8c455e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py
@@ -0,0 +1,315 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their own
+# license to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+from uuid import UUID
+
+from ansible.module_utils.six import text_type, binary_type
+
+FINAL_STATUSES = ('ACTIVE', 'ERROR')
+VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
+ 'error', 'error_deleting')
+
+CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
+ 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
+CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
+ 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
+ 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
+
+NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
+PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
+SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
+
+
+def rax_slugify(value):
+ """Prepend a key with rax_ and normalize the key name"""
+ return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+
+def rax_clb_node_to_dict(obj):
+ """Function to convert a CLB Node object to a dict"""
+ if not obj:
+ return {}
+ node = obj.to_dict()
+ node['id'] = obj.id
+ node['weight'] = obj.weight
+ return node
+
+
+def rax_to_dict(obj, obj_type='standard'):
+ """Generic function to convert a pyrax object to a dict
+
+ obj_type values:
+ standard
+ clb
+ server
+
+ """
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if obj_type == 'clb' and key == 'nodes':
+ instance[key] = []
+ for node in value:
+ instance[key].append(rax_clb_node_to_dict(node))
+ elif (isinstance(value, list) and len(value) > 0 and
+ not isinstance(value[0], NON_CALLABLES)):
+ instance[key] = []
+ for item in value:
+ instance[key].append(rax_to_dict(item))
+ elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
+ if obj_type == 'server':
+ if key == 'image':
+ if not value:
+ instance['rax_boot_source'] = 'volume'
+ else:
+ instance['rax_boot_source'] = 'local'
+ key = rax_slugify(key)
+ instance[key] = value
+
+ if obj_type == 'server':
+ for attr in ['id', 'accessIPv4', 'name', 'status']:
+ instance[attr] = instance.get(rax_slugify(attr))
+
+ return instance
+
+
+def rax_find_bootable_volume(module, rax_module, server, exit=True):
+ """Find a servers bootable volume"""
+ cs = rax_module.cloudservers
+ cbs = rax_module.cloud_blockstorage
+ server_id = rax_module.utils.get_id(server)
+ volumes = cs.volumes.get_server_volumes(server_id)
+ bootable_volumes = []
+ for volume in volumes:
+ vol = cbs.get(volume)
+ if module.boolean(vol.bootable):
+ bootable_volumes.append(vol)
+ if not bootable_volumes:
+ if exit:
+ module.fail_json(msg='No bootable volumes could be found for '
+ 'server %s' % server_id)
+ else:
+ return False
+ elif len(bootable_volumes) > 1:
+ if exit:
+ module.fail_json(msg='Multiple bootable volumes found for server '
+ '%s' % server_id)
+ else:
+ return False
+
+ return bootable_volumes[0]
+
+
+def rax_find_image(module, rax_module, image, exit=True):
+ """Find a server image by ID or Name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(image)
+ except ValueError:
+ try:
+ image = cs.images.find(human_id=image)
+ except(cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ try:
+ image = cs.images.find(name=image)
+ except (cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ if exit:
+ module.fail_json(msg='No matching image found (%s)' %
+ image)
+ else:
+ return False
+
+ return rax_module.utils.get_id(image)
+
+
+def rax_find_volume(module, rax_module, name):
+ """Find a Block storage volume by ID or name"""
+ cbs = rax_module.cloud_blockstorage
+ try:
+ UUID(name)
+ volume = cbs.get(name)
+ except ValueError:
+ try:
+ volume = cbs.find(name=name)
+ except rax_module.exc.NotFound:
+ volume = None
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ return volume
+
+
+def rax_find_network(module, rax_module, network):
+ """Find a cloud network by ID or name"""
+ cnw = rax_module.cloud_networks
+ try:
+ UUID(network)
+ except ValueError:
+ if network.lower() == 'public':
+ return cnw.get_server_networks(PUBLIC_NET_ID)
+ elif network.lower() == 'private':
+ return cnw.get_server_networks(SERVICE_NET_ID)
+ else:
+ try:
+ network_obj = cnw.find_network_by_label(network)
+ except (rax_module.exceptions.NetworkNotFound,
+ rax_module.exceptions.NetworkLabelNotUnique):
+ module.fail_json(msg='No matching network found (%s)' %
+ network)
+ else:
+ return cnw.get_server_networks(network_obj)
+ else:
+ return cnw.get_server_networks(network)
+
+
+def rax_find_server(module, rax_module, server):
+ """Find a Cloud Server by ID or name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(server)
+ server = cs.servers.get(server)
+ except ValueError:
+ servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
+ if not servers:
+ module.fail_json(msg='No Server was matched by name, '
+ 'try using the Server ID instead')
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers matched by name, '
+ 'try using the Server ID instead')
+
+ # We made it this far, grab the first and hopefully only server
+ # in the list
+ server = servers[0]
+ return server
+
+
+def rax_find_loadbalancer(module, rax_module, loadbalancer):
+ """Find a Cloud Load Balancer by ID or name"""
+ clb = rax_module.cloud_loadbalancers
+ try:
+ found = clb.get(loadbalancer)
+ except Exception:
+ found = []
+ for lb in clb.list():
+ if loadbalancer == lb.name:
+ found.append(lb)
+
+ if not found:
+ module.fail_json(msg='No loadbalancer was matched')
+
+ if len(found) > 1:
+ module.fail_json(msg='Multiple loadbalancers matched')
+
+ # We made it this far, grab the first and hopefully only item
+ # in the list
+ found = found[0]
+
+ return found
+
+
+def rax_argument_spec():
+ """Return standard base dictionary used for the argument_spec
+ argument in AnsibleModule
+
+ """
+ return dict(
+ api_key=dict(type='str', aliases=['password'], no_log=True),
+ auth_endpoint=dict(type='str'),
+ credentials=dict(type='path', aliases=['creds_file']),
+ env=dict(type='str'),
+ identity_type=dict(type='str', default='rackspace'),
+ region=dict(type='str'),
+ tenant_id=dict(type='str'),
+ tenant_name=dict(type='str'),
+ username=dict(type='str'),
+ validate_certs=dict(type='bool', aliases=['verify_ssl']),
+ )
+
+
+def rax_required_together():
+ """Return the default list used for the required_together argument to
+ AnsibleModule"""
+ return [['api_key', 'username']]
+
+
+def setup_rax_module(module, rax_module, region_required=True):
+ """Set up pyrax in a standard way for all modules"""
+ rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
+ rax_module.USER_AGENT)
+
+ api_key = module.params.get('api_key')
+ auth_endpoint = module.params.get('auth_endpoint')
+ credentials = module.params.get('credentials')
+ env = module.params.get('env')
+ identity_type = module.params.get('identity_type')
+ region = module.params.get('region')
+ tenant_id = module.params.get('tenant_id')
+ tenant_name = module.params.get('tenant_name')
+ username = module.params.get('username')
+ verify_ssl = module.params.get('validate_certs')
+
+ if env is not None:
+ rax_module.set_environment(env)
+
+ rax_module.set_setting('identity_type', identity_type)
+ if verify_ssl is not None:
+ rax_module.set_setting('verify_ssl', verify_ssl)
+ if auth_endpoint is not None:
+ rax_module.set_setting('auth_endpoint', auth_endpoint)
+ if tenant_id is not None:
+ rax_module.set_setting('tenant_id', tenant_id)
+ if tenant_name is not None:
+ rax_module.set_setting('tenant_name', tenant_name)
+
+ try:
+ username = username or os.environ.get('RAX_USERNAME')
+ if not username:
+ username = rax_module.get_setting('keyring_username')
+ if username:
+ api_key = 'USE_KEYRING'
+ if not api_key:
+ api_key = os.environ.get('RAX_API_KEY')
+ credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
+ os.environ.get('RAX_CREDS_FILE'))
+ region = (region or os.environ.get('RAX_REGION') or
+ rax_module.get_setting('region'))
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ try:
+ if api_key and username:
+ if api_key == 'USE_KEYRING':
+ rax_module.keyring_auth(username, region=region)
+ else:
+ rax_module.set_credentials(username, api_key=api_key,
+ region=region)
+ elif credentials:
+ credentials = os.path.expanduser(credentials)
+ rax_module.set_credential_file(credentials, region=region)
+ else:
+ raise Exception('No credentials supplied!')
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+
+ if region_required and region not in rax_module.regions:
+ module.fail_json(msg='%s is not a valid region, must be one of: %s' %
+ (region, ','.join(rax_module.regions)))
+
+ return rax_module
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
new file mode 100644
index 00000000..8f14dbad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
@@ -0,0 +1,2694 @@
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+from ansible.module_utils.urls import open_url
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+
+DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\
+ 'ID of the target %(resource)s resource when there is more '\
+ 'than one %(resource)s will use the first one in the '\
+ 'collection. Use the `resource_id` option to specify the '\
+ 'target %(resource)s ID'
+
+
+class RedfishUtils(object):
+
+ def __init__(self, creds, root_uri, timeout, module, resource_id=None,
+ data_modification=False):
+ self.root_uri = root_uri
+ self.creds = creds
+ self.timeout = timeout
+ self.module = module
+ self.service_root = '/redfish/v1/'
+ self.resource_id = resource_id
+ self.data_modification = data_modification
+ self._init_session()
+
+ # The following functions are to send GET/POST/PATCH/DELETE requests
+ def get_request(self, uri):
+ try:
+ resp = open_url(uri, method="GET", headers=GET_HEADERS,
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ data = json.loads(to_native(resp.read()))
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers}
+
+ def post_request(self, uri, pyld):
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=POST_HEADERS, method="POST",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ def patch_request(self, uri, pyld):
+ headers = PATCH_HEADERS
+ r = self.get_request(uri)
+ if r['ret']:
+ # Get etag from etag header or @odata.etag property
+ etag = r['headers'].get('etag')
+ if not etag:
+ etag = r['data'].get('@odata.etag')
+ if etag:
+ # Make copy of headers and add If-Match header
+ headers = dict(headers)
+ headers['If-Match'] = etag
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=headers, method="PATCH",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ def delete_request(self, uri, pyld=None):
+ try:
+ data = json.dumps(pyld) if pyld else None
+ resp = open_url(uri, data=data,
+ headers=DELETE_HEADERS, method="DELETE",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ @staticmethod
+ def _get_extended_message(error):
+ """
+ Get Redfish ExtendedInfo message from response payload if present
+ :param error: an HTTPError exception
+ :type error: HTTPError
+ :return: the ExtendedInfo message if present, else standard HTTP error
+ """
+ msg = http_client.responses.get(error.code, '')
+ if error.code >= 400:
+ try:
+ body = error.read().decode('utf-8')
+ data = json.loads(body)
+ ext_info = data['error']['@Message.ExtendedInfo']
+ msg = ext_info[0]['Message']
+ except Exception:
+ pass
+ return msg
+
+ def _init_session(self):
+ pass
+
+ def _find_accountservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'AccountService' not in data:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+ else:
+ account_service = data["AccountService"]["@odata.id"]
+ response = self.get_request(self.root_uri + account_service)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ accounts = data['Accounts']['@odata.id']
+ if accounts[-1:] == '/':
+ accounts = accounts[:-1]
+ self.accounts_uri = accounts
+ return {'ret': True}
+
+ def _find_sessionservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'SessionService' not in data:
+ return {'ret': False, 'msg': "SessionService resource not found"}
+ else:
+ session_service = data["SessionService"]["@odata.id"]
+ response = self.get_request(self.root_uri + session_service)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ sessions = data['Sessions']['@odata.id']
+ if sessions[-1:] == '/':
+ sessions = sessions[:-1]
+ self.sessions_uri = sessions
+ return {'ret': True}
+
+ def _get_resource_uri_by_id(self, uris, id_prop):
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ if id_prop == data.get('Id'):
+ return uri
+ return None
+
+ def _find_systems_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Systems' not in data:
+ return {'ret': False, 'msg': "Systems resource not found"}
+ response = self.get_request(self.root_uri + data['Systems']['@odata.id'])
+ if response['ret'] is False:
+ return response
+ self.systems_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.systems_uris:
+ return {
+ 'ret': False,
+ 'msg': "ComputerSystem's Members array is either empty or missing"}
+ self.systems_uri = self.systems_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.systems_uri = self._get_resource_uri_by_id(self.systems_uris,
+ self.resource_id)
+ if not self.systems_uri:
+ return {
+ 'ret': False,
+ 'msg': "System resource %s not found" % self.resource_id}
+ elif len(self.systems_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _find_updateservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'UpdateService' not in data:
+ return {'ret': False, 'msg': "UpdateService resource not found"}
+ else:
+ update = data["UpdateService"]["@odata.id"]
+ self.update_uri = update
+ response = self.get_request(self.root_uri + update)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ self.firmware_uri = self.software_uri = None
+ if 'FirmwareInventory' in data:
+ self.firmware_uri = data['FirmwareInventory'][u'@odata.id']
+ if 'SoftwareInventory' in data:
+ self.software_uri = data['SoftwareInventory'][u'@odata.id']
+ return {'ret': True}
+
+ def _find_chassis_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Chassis' not in data:
+ return {'ret': False, 'msg': "Chassis resource not found"}
+ chassis = data["Chassis"]["@odata.id"]
+ response = self.get_request(self.root_uri + chassis)
+ if response['ret'] is False:
+ return response
+ self.chassis_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.chassis_uris:
+ return {'ret': False,
+ 'msg': "Chassis Members array is either empty or missing"}
+ self.chassis_uri = self.chassis_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris,
+ self.resource_id)
+ if not self.chassis_uri:
+ return {
+ 'ret': False,
+ 'msg': "Chassis resource %s not found" % self.resource_id}
+ elif len(self.chassis_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _find_managers_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Managers' not in data:
+ return {'ret': False, 'msg': "Manager resource not found"}
+ manager = data["Managers"]["@odata.id"]
+ response = self.get_request(self.root_uri + manager)
+ if response['ret'] is False:
+ return response
+ self.manager_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.manager_uris:
+ return {'ret': False,
+ 'msg': "Managers Members array is either empty or missing"}
+ self.manager_uri = self.manager_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.manager_uri = self._get_resource_uri_by_id(self.manager_uris,
+ self.resource_id)
+ if not self.manager_uri:
+ return {
+ 'ret': False,
+ 'msg': "Manager resource %s not found" % self.resource_id}
+ elif len(self.manager_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _get_all_action_info_values(self, action):
+ """Retrieve all parameter values for an Action from ActionInfo.
+ Fall back to AllowableValue annotations if no ActionInfo found.
+ Return the result in an ActionInfo-like dictionary, keyed
+ by the name of the parameter. """
+ ai = {}
+ if '@Redfish.ActionInfo' in action:
+ ai_uri = action['@Redfish.ActionInfo']
+ response = self.get_request(self.root_uri + ai_uri)
+ if response['ret'] is True:
+ data = response['data']
+ if 'Parameters' in data:
+ params = data['Parameters']
+ ai = dict((p['Name'], p)
+ for p in params if 'Name' in p)
+ if not ai:
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in action.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ return ai
+
+ def _get_allowable_values(self, action, name, default_values=None):
+ if default_values is None:
+ default_values = []
+ ai = self._get_all_action_info_values(action)
+ allowable_values = ai.get(name, {}).get('AllowableValues')
+ # fallback to default values
+ if allowable_values is None:
+ allowable_values = default_values
+ return allowable_values
+
+ def get_logs(self):
+ log_svcs_uri_list = []
+ list_of_logs = []
+ properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat',
+ 'Message', 'MessageId', 'MessageArgs']
+
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for log_svcs_entry in data.get('Members', []):
+ response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ if 'Entries' in _data:
+ log_svcs_uri_list.append(_data['Entries'][u'@odata.id'])
+
+ # For each entry in LogServices, get log name and all log entries
+ for log_svcs_uri in log_svcs_uri_list:
+ logs = {}
+ list_of_log_entries = []
+ response = self.get_request(self.root_uri + log_svcs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ logs['Description'] = data.get('Description',
+ 'Collection of log entries')
+ # Get all log entries for each type of log found
+ for logEntry in data.get('Members', []):
+ entry = {}
+ for prop in properties:
+ if prop in logEntry:
+ entry[prop] = logEntry.get(prop)
+ if entry:
+ list_of_log_entries.append(entry)
+ log_name = log_svcs_uri.split('/')[-1]
+ logs[log_name] = list_of_log_entries
+ list_of_logs.append(logs)
+
+ # list_of_logs[logs{list_of_log_entries[entry{}]}]
+ return {'ret': True, 'entries': list_of_logs}
+
+ def clear_logs(self):
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for log_svcs_entry in data[u'Members']:
+ response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ # Check to make sure option is available, otherwise error is ugly
+ if "Actions" in _data:
+ if "#LogService.ClearLog" in _data[u"Actions"]:
+ self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def aggregate(self, func, uri_list, uri_name):
+ ret = True
+ entries = []
+ for uri in uri_list:
+ inventory = func(uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({uri_name: uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def aggregate_chassis(self, func):
+ return self.aggregate(func, self.chassis_uris, 'chassis_uri')
+
+ def aggregate_managers(self, func):
+ return self.aggregate(func, self.manager_uris, 'manager_uri')
+
+ def aggregate_systems(self, func):
+ return self.aggregate(func, self.systems_uris, 'system_uri')
+
+ def get_storage_controller_inventory(self, systems_uri):
+ result = {}
+ controller_list = []
+ controller_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
+ 'Location', 'Manufacturer', 'Model', 'Name', 'Id',
+ 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
+ key = "StorageControllers"
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'Storage' not in data:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data['Storage']["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Loop through Members and their StorageControllers
+ # and gather properties from each StorageController
+ if data[u'Members']:
+ for storage_member in data[u'Members']:
+ storage_member_uri = storage_member[u'@odata.id']
+ response = self.get_request(self.root_uri + storage_member_uri)
+ data = response['data']
+
+ if key in data:
+ controller_list = data[key]
+ for controller in controller_list:
+ controller_result = {}
+ for property in properties:
+ if property in controller:
+ controller_result[property] = controller[property]
+ controller_results.append(controller_result)
+ result['entries'] = controller_results
+ return result
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ def get_multi_storage_controller_inventory(self):
+ return self.aggregate_systems(self.get_storage_controller_inventory)
+
+ def get_disk_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
+ 'EncryptionAbility', 'EncryptionStatus',
+ 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
+ 'Manufacturer', 'MediaType', 'Model', 'Name',
+ 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
+ 'RotationSpeedRPM', 'SerialNumber', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data[u'Members']:
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ if 'Drives' in data:
+ for device in data[u'Drives']:
+ disk_uri = self.root_uri + device[u'@odata.id']
+ response = self.get_request(disk_uri)
+ data = response['data']
+
+ drive_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ drive_result[property] = data[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ if 'SimpleStorage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data["SimpleStorage"]["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Name' in data:
+ controller_name = data['Name']
+ else:
+ sc_id = data.get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ for device in data[u'Devices']:
+ drive_result = {}
+ for property in properties:
+ if property in device:
+ drive_result[property] = device[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ return result
+
+ def get_multi_disk_inventory(self):
+ return self.aggregate_systems(self.get_disk_inventory)
+
+ def get_volume_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ volume_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes',
+ 'Capacity', 'CapacityBytes', 'CapacitySources',
+ 'Encrypted', 'EncryptionTypes', 'Identifiers',
+ 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities',
+ 'AllocatedPools', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data.get('Members'):
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ volume_results = []
+ if 'Volumes' in data:
+ # Get a list of all volumes and build respective URIs
+ volumes_uri = data[u'Volumes'][u'@odata.id']
+ response = self.get_request(self.root_uri + volumes_uri)
+ data = response['data']
+
+ if data.get('Members'):
+ for volume in data[u'Members']:
+ volume_list.append(volume[u'@odata.id'])
+ for v in volume_list:
+ uri = self.root_uri + v
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ volume_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ volume_result[property] = data[property]
+
+ # Get related Drives Id
+ drive_id_list = []
+ if 'Links' in data:
+ if 'Drives' in data[u'Links']:
+ for link in data[u'Links'][u'Drives']:
+ drive_id_link = link[u'@odata.id']
+ drive_id = drive_id_link.split("/")[-1]
+ drive_id_list.append({'Id': drive_id})
+ volume_result['Linked_drives'] = drive_id_list
+ volume_results.append(volume_result)
+ volumes = {'Controller': controller_name,
+ 'Volumes': volume_results}
+ result["entries"].append(volumes)
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ return result
+
+ def get_multi_volume_inventory(self):
+ return self.aggregate_systems(self.get_volume_inventory)
+
+ def manage_indicator_led(self, command):
+ result = {}
+ key = 'IndicatorLED'
+
+ payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'}
+
+ result = {}
+ response = self.get_request(self.root_uri + self.chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ if command in payloads.keys():
+ payload = {'IndicatorLED': payloads[command]}
+ response = self.patch_request(self.root_uri + self.chassis_uri, payload)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command'}
+
+ return result
+
+ def _map_reset_type(self, reset_type, allowable_values):
+ equiv_types = {
+ 'On': 'ForceOn',
+ 'ForceOn': 'On',
+ 'ForceOff': 'GracefulShutdown',
+ 'GracefulShutdown': 'ForceOff',
+ 'GracefulRestart': 'ForceRestart',
+ 'ForceRestart': 'GracefulRestart'
+ }
+
+ if reset_type in allowable_values:
+ return reset_type
+ if reset_type not in equiv_types:
+ return reset_type
+ mapped_type = equiv_types[reset_type]
+ if mapped_type in allowable_values:
+ return mapped_type
+ return reset_type
+
+ def manage_system_power(self, command):
+ return self.manage_power(command, self.systems_uri,
+ '#ComputerSystem.Reset')
+
+ def manage_manager_power(self, command):
+ return self.manage_power(command, self.manager_uri,
+ '#Manager.Reset')
+
+ def manage_power(self, command, resource_uri, action_name):
+ key = "Actions"
+ reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
+ 'GracefulRestart', 'ForceRestart', 'Nmi',
+ 'ForceOn', 'PushPowerButton', 'PowerCycle']
+
+ # command should be PowerOn, PowerForceOff, etc.
+ if not command.startswith('Power'):
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+ reset_type = command[5:]
+
+ # map Reboot to a ResetType that does a reboot
+ if reset_type == 'Reboot':
+ reset_type = 'GracefulRestart'
+
+ if reset_type not in reset_type_values:
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+
+ # read the resource and get the current power state
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ power_state = data.get('PowerState')
+
+ # if power is already in target state, nothing to do
+ if power_state == "On" and reset_type in ['On', 'ForceOn']:
+ return {'ret': True, 'changed': False}
+ if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
+ return {'ret': True, 'changed': False}
+
+ # get the reset Action and target URI
+ if key not in data or action_name not in data[key]:
+ return {'ret': False, 'msg': 'Action %s not found' % action_name}
+ reset_action = data[key][action_name]
+ if 'target' not in reset_action:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action_name}
+ action_uri = reset_action['target']
+
+ # get AllowableValues
+ ai = self._get_all_action_info_values(reset_action)
+ allowable_values = ai.get('ResetType', {}).get('AllowableValues', [])
+
+ # map ResetType to an allowable value if needed
+ if reset_type not in allowable_values:
+ reset_type = self._map_reset_type(reset_type, allowable_values)
+
+ # define payload
+ payload = {'ResetType': reset_type}
+
+ # POST to Action URI
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+ def _find_account_uri(self, username=None, acct_id=None):
+ if not any((username, acct_id)):
+ return {'ret': False, 'msg':
+ 'Must provide either account_id or account_username'}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if username:
+ if username == data.get('UserName'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+ if acct_id:
+ if acct_id == data.get('Id'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No account with the given account_id or account_username found'}
+
+ def _find_empty_account_slot(self):
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ if uris:
+ # first slot may be reserved, so move to end of list
+ uris += [uris.pop(0)]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if data.get('UserName') == "" and not data.get('Enabled', True):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No empty account slot found'}
+
+ def list_users(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ user_list = []
+ users_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for users in data.get('Members', []):
+ user_list.append(users[u'@odata.id']) # user_list[] are URIs
+
+ # for each user, get details
+ for uri in user_list:
+ user = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ user[property] = data[property]
+
+ users_results.append(user)
+ result["entries"] = users_results
+ return result
+
+ def add_user_via_patch(self, user):
+ if user.get('account_id'):
+ # If Id slot specified, use it
+ response = self._find_account_uri(acct_id=user.get('account_id'))
+ else:
+ # Otherwise find first empty slot
+ response = self._find_empty_account_slot()
+
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def add_user(self, user):
+ if not user.get('account_username'):
+ return {'ret': False, 'msg':
+ 'Must provide account_username for AddUser command'}
+
+ response = self._find_account_uri(username=user.get('account_username'))
+ if response['ret']:
+ # account_username already exists, nothing to do
+ return {'ret': True, 'changed': False}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if not response['ret']:
+ return response
+ headers = response['headers']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'POST' not in methods:
+ # if Allow header present and POST not listed, add via PATCH
+ return self.add_user_via_patch(user)
+
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+
+ response = self.post_request(self.root_uri + self.accounts_uri, payload)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if POST returned a 405, try to add via PATCH
+ return self.add_user_via_patch(user)
+ else:
+ return response
+ return {'ret': True}
+
+ def enable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data.get('Enabled', True):
+ # account already enabled, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'Enabled': True}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def delete_user_via_patch(self, user, uri=None, data=None):
+ if not uri:
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data and data.get('UserName') == '' and not data.get('Enabled', False):
+ # account UserName already cleared, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'UserName': ''}
+ if data.get('Enabled', False):
+ payload['Enabled'] = False
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def delete_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ if response.get('no_match'):
+ # account does not exist, nothing to do
+ return {'ret': True, 'changed': False}
+ else:
+ # some error encountered
+ return response
+
+ uri = response['uri']
+ headers = response['headers']
+ data = response['data']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'DELETE' not in methods:
+ # if Allow header present and DELETE not listed, del via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+
+ response = self.delete_request(self.root_uri + uri)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if DELETE returned a 405, try to delete via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+ else:
+ return response
+ return {'ret': True}
+
+ def disable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if not data.get('Enabled'):
+ # account already disabled, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'Enabled': False}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_role(self, user):
+ if not user.get('account_roleid'):
+ return {'ret': False, 'msg':
+ 'Must provide account_roleid for UpdateUserRole command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data.get('RoleId') == user.get('account_roleid'):
+ # account already has RoleId , nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'RoleId': user.get('account_roleid')}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_password(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {'Password': user['account_password']}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_name(self, user):
+ if not user.get('account_updatename'):
+ return {'ret': False, 'msg':
+ 'Must provide account_updatename for UpdateUserName command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {'UserName': user['account_updatename']}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_accountservice_properties(self, user):
+ if user.get('account_properties') is None:
+ return {'ret': False, 'msg':
+ 'Must provide account_properties for UpdateAccountServiceProperties command'}
+ account_properties = user.get('account_properties')
+
+ # Find AccountService
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'AccountService' not in data:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+ accountservice_uri = data["AccountService"]["@odata.id"]
+
+ # Check support or not
+ response = self.get_request(self.root_uri + accountservice_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for property_name in account_properties.keys():
+ if property_name not in data:
+ return {'ret': False, 'msg':
+ 'property %s not supported' % property_name}
+
+ # if properties is already matched, nothing to do
+ need_change = False
+ for property_name in account_properties.keys():
+ if account_properties[property_name] != data[property_name]:
+ need_change = True
+ break
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"}
+
+ payload = account_properties
+ response = self.patch_request(self.root_uri + accountservice_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"}
+
+ def get_sessions(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ session_list = []
+ sessions_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'Id', 'Name', 'UserName']
+
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for sessions in data[u'Members']:
+ session_list.append(sessions[u'@odata.id']) # session_list[] are URIs
+
+ # for each session, get details
+ for uri in session_list:
+ session = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ session[property] = data[property]
+
+ sessions_results.append(session)
+ result["entries"] = sessions_results
+ return result
+
+ def clear_sessions(self):
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # if no active sessions, return as success
+ if data['Members@odata.count'] == 0:
+ return {'ret': True, 'changed': False, 'msg': "There is no active sessions"}
+
+ # loop to delete every active session
+ for session in data[u'Members']:
+ response = self.delete_request(self.root_uri + session[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
+
+ def get_firmware_update_capabilities(self):
+ result = {}
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+
+ result['entries'] = {}
+
+ data = response['data']
+
+ if "Actions" in data:
+ actions = data['Actions']
+ if len(actions) > 0:
+ for key in actions.keys():
+ action = actions.get(key)
+ if 'title' in action:
+ title = action['title']
+ else:
+ title = key
+ result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues',
+ ["Key TransferProtocol@Redfish.AllowableValues not found"])
+ else:
+ return {'ret': "False", 'msg': "Actions list is empty."}
+ else:
+ return {'ret': "False", 'msg': "Key Actions not found."}
+ return result
+
+ def _software_inventory(self, uri):
+ result = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ result['entries'] = []
+ for member in data[u'Members']:
+ uri = self.root_uri + member[u'@odata.id']
+ # Get details for each software or firmware member
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ software = {}
+ # Get these standard properties if present
+ for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
+ 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
+ 'ReleaseDate']:
+ if key in data:
+ software[key] = data.get(key)
+ result['entries'].append(software)
+ return result
+
+ def get_firmware_inventory(self):
+ if self.firmware_uri is None:
+ return {'ret': False, 'msg': 'No FirmwareInventory resource found'}
+ else:
+ return self._software_inventory(self.firmware_uri)
+
+ def get_software_inventory(self):
+ if self.software_uri is None:
+ return {'ret': False, 'msg': 'No SoftwareInventory resource found'}
+ else:
+ return self._software_inventory(self.software_uri)
+
+ def simple_update(self, update_opts):
+ image_uri = update_opts.get('update_image_uri')
+ protocol = update_opts.get('update_protocol')
+ targets = update_opts.get('update_targets')
+ creds = update_opts.get('update_creds')
+
+ if not image_uri:
+ return {'ret': False, 'msg':
+ 'Must specify update_image_uri for the SimpleUpdate command'}
+
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Actions' not in data:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ if '#UpdateService.SimpleUpdate' not in data['Actions']:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ action = data['Actions']['#UpdateService.SimpleUpdate']
+ if 'target' not in action:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ update_uri = action['target']
+ if protocol:
+ default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF',
+ 'SCP', 'TFTP', 'OEM', 'NFS']
+ allowable_values = self._get_allowable_values(action,
+ 'TransferProtocol',
+ default_values)
+ if protocol not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified update_protocol (%s) not supported '
+ 'by service. Supported protocols: %s' %
+ (protocol, allowable_values)}
+ if targets:
+ allowable_values = self._get_allowable_values(action, 'Targets')
+ if allowable_values:
+ for target in targets:
+ if target not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified target (%s) not supported '
+ 'by service. Supported targets: %s' %
+ (target, allowable_values)}
+
+ payload = {
+ 'ImageURI': image_uri
+ }
+ if protocol:
+ payload["TransferProtocol"] = protocol
+ if targets:
+ payload["Targets"] = targets
+ if creds:
+ if creds.get('username'):
+ payload["Username"] = creds.get('username')
+ if creds.get('password'):
+ payload["Password"] = creds.get('password')
+ response = self.post_request(self.root_uri + update_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "SimpleUpdate requested"}
+
+ def get_bios_attributes(self, systems_uri):
+ result = {}
+ bios_attributes = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ for attribute in data[u'Attributes'].items():
+ bios_attributes[attribute[0]] = attribute[1]
+ result["entries"] = bios_attributes
+ return result
+
+ def get_multi_bios_attributes(self):
+ return self.aggregate_systems(self.get_bios_attributes)
+
+ def _get_boot_options_dict(self, boot):
+ # Get these entries from BootOption, if present
+ properties = ['DisplayName', 'BootOptionReference']
+
+ # Retrieve BootOptions if present
+ if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']:
+ boot_options_uri = boot['BootOptions']["@odata.id"]
+ # Get BootOptions resource
+ response = self.get_request(self.root_uri + boot_options_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+
+ # Retrieve Members array
+ if 'Members' not in data:
+ return {}
+ members = data['Members']
+ else:
+ members = []
+
+ # Build dict of BootOptions keyed by BootOptionReference
+ boot_options_dict = {}
+ for member in members:
+ if '@odata.id' not in member:
+ return {}
+ boot_option_uri = member['@odata.id']
+ response = self.get_request(self.root_uri + boot_option_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+ if 'BootOptionReference' not in data:
+ return {}
+ boot_option_ref = data['BootOptionReference']
+
+ # fetch the props to display for this boot device
+ boot_props = {}
+ for prop in properties:
+ if prop in data:
+ boot_props[prop] = data[prop]
+
+ boot_options_dict[boot_option_ref] = boot_props
+
+ return boot_options_dict
+
+ def get_boot_order(self, systems_uri):
+ result = {}
+
+ # Retrieve System resource
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # Build boot device list
+ boot_device_list = []
+ for ref in boot_order:
+ boot_device_list.append(
+ boot_options_dict.get(ref, {'BootOptionReference': ref}))
+
+ result["entries"] = boot_device_list
+ return result
+
+ def get_multi_boot_order(self):
+ return self.aggregate_systems(self.get_boot_order)
+
+ def get_boot_override(self, systems_uri):
+ result = {}
+
+ properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget",
+ "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"]
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if 'Boot' not in data:
+ return {'ret': False, 'msg': "Key Boot not found"}
+
+ boot = data['Boot']
+
+ boot_overrides = {}
+ if "BootSourceOverrideEnabled" in boot:
+ if boot["BootSourceOverrideEnabled"] is not False:
+ for property in properties:
+ if property in boot:
+ if boot[property] is not None:
+ boot_overrides[property] = boot[property]
+ else:
+ return {'ret': False, 'msg': "No boot override is enabled."}
+
+ result['entries'] = boot_overrides
+ return result
+
+ def get_multi_boot_override(self):
+ return self.aggregate_systems(self.get_boot_override)
+
+ def set_bios_default_settings(self):
+ result = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"]
+
+ response = self.post_request(self.root_uri + reset_bios_settings_uri, {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"}
+
+ def set_boot_override(self, boot_opts):
+ result = {}
+ key = "Boot"
+
+ bootdevice = boot_opts.get('bootdevice')
+ uefi_target = boot_opts.get('uefi_target')
+ boot_next = boot_opts.get('boot_next')
+ override_enabled = boot_opts.get('override_enabled')
+
+ if not bootdevice and override_enabled != 'Disabled':
+ return {'ret': False,
+ 'msg': "bootdevice option required for temporary boot override"}
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ boot = data[key]
+
+ annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues'
+ if annotation in boot:
+ allowable_values = boot[annotation]
+ if isinstance(allowable_values, list) and bootdevice not in allowable_values:
+ return {'ret': False,
+ 'msg': "Boot device %s not in list of allowable values (%s)" %
+ (bootdevice, allowable_values)}
+
+ # read existing values
+ cur_enabled = boot.get('BootSourceOverrideEnabled')
+ target = boot.get('BootSourceOverrideTarget')
+ cur_uefi_target = boot.get('UefiTargetBootSourceOverride')
+ cur_boot_next = boot.get('BootNext')
+
+ if override_enabled == 'Disabled':
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled
+ }
+ }
+ elif bootdevice == 'UefiTarget':
+ if not uefi_target:
+ return {'ret': False,
+ 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
+ if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'UefiTargetBootSourceOverride': uefi_target
+ }
+ }
+ elif bootdevice == 'UefiBootNext':
+ if not boot_next:
+ return {'ret': False,
+ 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
+ if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'BootNext': boot_next
+ }
+ }
+ else:
+ if cur_enabled == override_enabled and target == bootdevice:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice
+ }
+ }
+
+ response = self.patch_request(self.root_uri + self.systems_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+ def set_bios_attributes(self, attributes):
+ result = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Make a copy of the attributes dict
+ attrs_to_patch = dict(attributes)
+
+ # Check the attributes
+ for attr in attributes:
+ if attr not in data[u'Attributes']:
+ return {'ret': False, 'msg': "BIOS attribute %s not found" % attr}
+ # If already set to requested value, remove it from PATCH payload
+ if data[u'Attributes'][attr] == attributes[attr]:
+ del attrs_to_patch[attr]
+
+ # Return success w/ changed=False if no attrs need to be changed
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "BIOS attributes already set"}
+
+ # Get the SettingsObject URI
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
+
+ # Construct payload and issue PATCH command
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"}
+
+ def set_boot_order(self, boot_list):
+ if not boot_list:
+ return {'ret': False,
+ 'msg': "boot_order list required for SetBootOrder command"}
+
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # validate boot_list against BootOptionReferences if available
+ if boot_options_dict:
+ boot_option_references = boot_options_dict.keys()
+ for ref in boot_list:
+ if ref not in boot_option_references:
+ return {'ret': False,
+ 'msg': "BootOptionReference %s not found in BootOptions" % ref}
+
+ # If requested BootOrder is already set, nothing to do
+ if boot_order == boot_list:
+ return {'ret': True, 'changed': False,
+ 'msg': "BootOrder already set to %s" % boot_list}
+
+ payload = {
+ 'Boot': {
+ 'BootOrder': boot_list
+ }
+ }
+ response = self.patch_request(self.root_uri + systems_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "BootOrder set"}
+
+ def set_default_boot_order(self):
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # get the #ComputerSystem.SetDefaultBootOrder Action and target URI
+ action = '#ComputerSystem.SetDefaultBootOrder'
+ if 'Actions' not in data or action not in data['Actions']:
+ return {'ret': False, 'msg': 'Action %s not found' % action}
+ if 'target' not in data['Actions'][action]:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action}
+ action_uri = data['Actions'][action]['target']
+
+ # POST to Action URI
+ payload = {}
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "BootOrder set to default"}
+
+ def get_chassis_inventory(self):
+ result = {}
+ chassis_results = []
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag',
+ 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ chassis_result = {}
+ for property in properties:
+ if property in data:
+ chassis_result[property] = data[property]
+ chassis_results.append(chassis_result)
+
+ result["entries"] = chassis_results
+ return result
+
+ def get_fan_inventory(self):
+ result = {}
+ fan_results = []
+ key = "Thermal"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ # match: found an entry for "Thermal" information = fans
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for device in data[u'Fans']:
+ fan = {}
+ for property in properties:
+ if property in device:
+ fan[property] = device[property]
+ fan_results.append(fan)
+ result["entries"] = fan_results
+ return result
+
+ def get_chassis_power(self):
+ result = {}
+ key = "Power"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PowerAllocatedWatts',
+ 'PowerAvailableWatts', 'PowerCapacityWatts',
+ 'PowerConsumedWatts', 'PowerMetrics',
+ 'PowerRequestedWatts', 'RelatedItem', 'Status']
+
+ chassis_power_results = []
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ chassis_power_result = {}
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ response = self.get_request(self.root_uri + data[key]['@odata.id'])
+ data = response['data']
+ if 'PowerControl' in data:
+ if len(data['PowerControl']) > 0:
+ data = data['PowerControl'][0]
+ for property in properties:
+ if property in data:
+ chassis_power_result[property] = data[property]
+ else:
+ return {'ret': False, 'msg': 'Key PowerControl not found.'}
+ chassis_power_results.append(chassis_power_result)
+ else:
+ return {'ret': False, 'msg': 'Key Power not found.'}
+
+ result['entries'] = chassis_power_results
+ return result
+
+ def get_chassis_thermals(self):
+ result = {}
+ sensors = []
+ key = "Thermal"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical',
+ 'UpperThresholdFatal', 'UpperThresholdNonCritical',
+ 'LowerThresholdCritical', 'LowerThresholdFatal',
+ 'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
+ 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
+ 'SensorNumber']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if "Temperatures" in data:
+ for sensor in data[u'Temperatures']:
+ sensor_result = {}
+ for property in properties:
+ if property in sensor:
+ if sensor[property] is not None:
+ sensor_result[property] = sensor[property]
+ sensors.append(sensor_result)
+
+ if sensors is None:
+ return {'ret': False, 'msg': 'Key Temperatures was not found.'}
+
+ result['entries'] = sensors
+ return result
+
+ def get_cpu_inventory(self, systems_uri):
+ result = {}
+ cpu_list = []
+ cpu_results = []
+ key = "Processors"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz',
+ 'TotalCores', 'TotalThreads', 'Status']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ processors_uri = data[key]["@odata.id"]
+
+ # Get a list of all CPUs and build respective URIs
+ response = self.get_request(self.root_uri + processors_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for cpu in data[u'Members']:
+ cpu_list.append(cpu[u'@odata.id'])
+
+ for c in cpu_list:
+ cpu = {}
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ cpu[property] = data[property]
+
+ cpu_results.append(cpu)
+ result["entries"] = cpu_results
+ return result
+
+ def get_multi_cpu_inventory(self):
+ return self.aggregate_systems(self.get_cpu_inventory)
+
+ def get_memory_inventory(self, systems_uri):
+ result = {}
+ memory_list = []
+ memory_results = []
+ key = "Memory"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber',
+ 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ memory_uri = data[key]["@odata.id"]
+
+ # Get a list of all DIMMs and build respective URIs
+ response = self.get_request(self.root_uri + memory_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for dimm in data[u'Members']:
+ memory_list.append(dimm[u'@odata.id'])
+
+ for m in memory_list:
+ dimm = {}
+ uri = self.root_uri + m
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if "Status" in data:
+ if "State" in data["Status"]:
+ if data["Status"]["State"] == "Absent":
+ continue
+ else:
+ continue
+
+ for property in properties:
+ if property in data:
+ dimm[property] = data[property]
+
+ memory_results.append(dimm)
+ result["entries"] = memory_results
+ return result
+
+ def get_multi_memory_inventory(self):
+ return self.aggregate_systems(self.get_memory_inventory)
+
+ def get_nic_inventory(self, resource_uri):
+ result = {}
+ nic_list = []
+ nic_results = []
+ key = "EthernetInterfaces"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
+ 'NameServers', 'MACAddress', 'PermanentMACAddress',
+ 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ ethernetinterfaces_uri = data[key]["@odata.id"]
+
+ # Get a list of all network controllers and build respective URIs
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for nic in data[u'Members']:
+ nic_list.append(nic[u'@odata.id'])
+
+ for n in nic_list:
+ nic = {}
+ uri = self.root_uri + n
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ nic[property] = data[property]
+
+ nic_results.append(nic)
+ result["entries"] = nic_results
+ return result
+
+ def get_multi_nic_inventory(self, resource_type):
+ ret = True
+ entries = []
+
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uris = self.systems_uris
+ elif resource_type == 'Manager':
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ inventory = self.get_nic_inventory(resource_uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({'resource_uri': resource_uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def get_virtualmedia(self, resource_uri):
+ result = {}
+ virtualmedia_list = []
+ virtualmedia_results = []
+ key = "VirtualMedia"
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes',
+ 'Image', 'ImageName', 'Name', 'WriteProtected',
+ 'TransferMethod', 'TransferProtocolType']
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ virtualmedia_uri = data[key]["@odata.id"]
+
+ # Get a list of all virtual media and build respective URIs
+ response = self.get_request(self.root_uri + virtualmedia_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for virtualmedia in data[u'Members']:
+ virtualmedia_list.append(virtualmedia[u'@odata.id'])
+
+ for n in virtualmedia_list:
+ virtualmedia = {}
+ uri = self.root_uri + n
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ virtualmedia[property] = data[property]
+
+ virtualmedia_results.append(virtualmedia)
+ result["entries"] = virtualmedia_results
+ return result
+
+ def get_multi_virtualmedia(self):
+ ret = True
+ entries = []
+
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ virtualmedia = self.get_virtualmedia(resource_uri)
+ ret = virtualmedia.pop('ret') and ret
+ if 'entries' in virtualmedia:
+ entries.append(({'resource_uri': resource_uri},
+ virtualmedia['entries']))
+ return dict(ret=ret, entries=entries)
+
+ @staticmethod
+ def _find_empty_virt_media_slot(resources, media_types,
+ media_match_strict=True):
+ for uri, data in resources.items():
+ # check MediaTypes
+ if 'MediaTypes' in data and media_types:
+ if not set(media_types).intersection(set(data['MediaTypes'])):
+ continue
+ else:
+ if media_match_strict:
+ continue
+ # if ejected, 'Inserted' should be False and 'ImageName' cleared
+ if (not data.get('Inserted', False) and
+ not data.get('ImageName')):
+ return uri, data
+ return None, None
+
+ @staticmethod
+ def _virt_media_image_inserted(resources, image_url):
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ if data.get('Inserted', False) and data.get('ImageName'):
+ return True
+ return False
+
+ @staticmethod
+ def _find_virt_media_to_eject(resources, image_url):
+ matched_uri, matched_data = None, None
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ matched_uri, matched_data = uri, data
+ if data.get('Inserted', True) and data.get('ImageName', 'x'):
+ return uri, data, True
+ return matched_uri, matched_data, False
+
+ def _read_virt_media_resources(self, uri_list):
+ resources = {}
+ headers = {}
+ for uri in uri_list:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ resources[uri] = response['data']
+ headers[uri] = response['headers']
+ return resources, headers
+
+ @staticmethod
+ def _insert_virt_media_payload(options, param_map, data, ai):
+ payload = {
+ 'Image': options.get('image_url')
+ }
+ for param, option in param_map.items():
+ if options.get(option) is not None and param in data:
+ allowable = ai.get(param, {}).get('AllowableValues', [])
+ if allowable and options.get(option) not in allowable:
+ return {'ret': False,
+ 'msg': "Value '%s' specified for option '%s' not "
+ "in list of AllowableValues %s" % (
+ options.get(option), option,
+ allowable)}
+ payload[param] = options.get(option)
+ return payload
+
+ def virtual_media_insert_via_patch(self, options, param_map, uri, data):
+ # get AllowableValues
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in data.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ if 'Inserted' not in payload:
+ payload['Inserted'] = True
+ # PATCH the resource
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def virtual_media_insert(self, options):
+ param_map = {
+ 'Inserted': 'inserted',
+ 'WriteProtected': 'write_protected',
+ 'UserName': 'username',
+ 'Password': 'password',
+ 'TransferProtocolType': 'transfer_protocol_type',
+ 'TransferMethod': 'transfer_method'
+ }
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaInsert"}
+ media_types = options.get('media_types')
+
+ # locate and read the VirtualMedia resources
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # see if image already inserted; if so, nothing to do
+ if self._virt_media_image_inserted(resources, image_url):
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia '%s' already inserted" % image_url}
+
+ # find an empty slot to insert the media
+ # try first with strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=True)
+ if not uri:
+ # if not found, try without strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=False)
+ if not uri:
+ return {'ret': False,
+ 'msg': "Unable to find an available VirtualMedia resource "
+ "%s" % ('supporting ' + str(media_types)
+ if media_types else '')}
+
+ # confirm InsertMedia action found
+ if ('Actions' not in data or
+ '#VirtualMedia.InsertMedia' not in data['Actions']):
+ # try to insert via PATCH if no InsertMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.InsertMedia'}
+ return self.virtual_media_insert_via_patch(options, param_map,
+ uri, data)
+
+ # get the action property
+ action = data['Actions']['#VirtualMedia.InsertMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI missing from Action "
+ "#VirtualMedia.InsertMedia"}
+ action_uri = action['target']
+ # get ActionInfo or AllowableValues
+ ai = self._get_all_action_info_values(action)
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def virtual_media_eject_via_patch(self, uri):
+ # construct payload
+ payload = {
+ 'Inserted': False,
+ 'Image': None
+ }
+ # PATCH resource
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+
+ def virtual_media_eject(self, options):
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaEject"}
+
+ # locate and read the VirtualMedia resources
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # find the VirtualMedia resource to eject
+ uri, data, eject = self._find_virt_media_to_eject(resources, image_url)
+ if uri and eject:
+ if ('Actions' not in data or
+ '#VirtualMedia.EjectMedia' not in data['Actions']):
+ # try to eject via PATCH if no EjectMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.EjectMedia'}
+ return self.virtual_media_eject_via_patch(uri)
+ else:
+ # POST to the EjectMedia Action
+ action = data['Actions']['#VirtualMedia.EjectMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI property missing from Action "
+ "#VirtualMedia.EjectMedia"}
+ action_uri = action['target']
+ # empty payload for Eject action
+ payload = {}
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri,
+ payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+ elif uri and not eject:
+ # already ejected: return success but changed=False
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia image '%s' already ejected" %
+ image_url}
+ else:
+ # return failure (no resources matching image_url found)
+ return {'ret': False, 'changed': False,
+ 'msg': "No VirtualMedia resource found with image '%s' "
+ "inserted" % image_url}
+
+ def get_psu_inventory(self):
+ result = {}
+ psu_list = []
+ psu_results = []
+ key = "PowerSupplies"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer',
+ 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType',
+ 'Status']
+
+ # Get a list of all Chassis and build URIs, then get all PowerSupplies
+ # from each Power entry in the Chassis
+ chassis_uri_list = self.chassis_uris
+ for chassis_uri in chassis_uri_list:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+ data = response['data']
+
+ if 'Power' in data:
+ power_uri = data[u'Power'][u'@odata.id']
+ else:
+ continue
+
+ response = self.get_request(self.root_uri + power_uri)
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ psu_list = data[key]
+ for psu in psu_list:
+ psu_not_present = False
+ psu_data = {}
+ for property in properties:
+ if property in psu:
+ if psu[property] is not None:
+ if property == 'Status':
+ if 'State' in psu[property]:
+ if psu[property]['State'] == 'Absent':
+ psu_not_present = True
+ psu_data[property] = psu[property]
+ if psu_not_present:
+ continue
+ psu_results.append(psu_data)
+
+ result["entries"] = psu_results
+ if not result["entries"]:
+ return {'ret': False, 'msg': "No PowerSupply objects found"}
+ return result
+
+ def get_multi_psu_inventory(self):
+ return self.aggregate_systems(self.get_psu_inventory)
+
+ def get_system_inventory(self, systems_uri):
+ result = {}
+ inventory = {}
+ # Get these entries, but does not fail if not found
+ properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
+ 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
+ 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
+ 'ProcessorSummary', 'TrustedModules', 'Name', 'Id']
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ inventory[property] = data[property]
+
+ result["entries"] = inventory
+ return result
+
+ def get_multi_system_inventory(self):
+ return self.aggregate_systems(self.get_system_inventory)
+
+ def get_network_protocols(self):
+ result = {}
+ service_result = {}
+ # Find NetworkProtocol
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'NetworkProtocol' not in data:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+ networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
+
+ response = self.get_request(self.root_uri + networkprotocol_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ for protocol_service in protocol_services:
+ if protocol_service in data.keys():
+ service_result[protocol_service] = data[protocol_service]
+
+ result['ret'] = True
+ result["entries"] = service_result
+ return result
+
+ def set_network_protocols(self, manager_services):
+ # Check input data validity
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ protocol_state_onlist = ['true', 'True', True, 'on', 1]
+ protocol_state_offlist = ['false', 'False', False, 'off', 0]
+ payload = {}
+ for service_name in manager_services.keys():
+ if service_name not in protocol_services:
+ return {'ret': False, 'msg': "Service name %s is invalid" % service_name}
+ payload[service_name] = {}
+ for service_property in manager_services[service_name].keys():
+ value = manager_services[service_name][service_property]
+ if service_property in ['ProtocolEnabled', 'protocolenabled']:
+ if value in protocol_state_onlist:
+ payload[service_name]['ProtocolEnabled'] = True
+ elif value in protocol_state_offlist:
+ payload[service_name]['ProtocolEnabled'] = False
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ elif service_property in ['port', 'Port']:
+ if isinstance(value, int):
+ payload[service_name]['Port'] = value
+ elif isinstance(value, str) and value.isdigit():
+ payload[service_name]['Port'] = int(value)
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ else:
+ payload[service_name][service_property] = value
+
+ # Find NetworkProtocol
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'NetworkProtocol' not in data:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+ networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
+
+ # Check service property support or not
+ response = self.get_request(self.root_uri + networkprotocol_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for service_name in payload.keys():
+ if service_name not in data:
+ return {'ret': False, 'msg': "%s service not supported" % service_name}
+ for service_property in payload[service_name].keys():
+ if service_property not in data[service_name]:
+ return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)}
+
+ # if the protocol is already set, nothing to do
+ need_change = False
+ for service_name in payload.keys():
+ for service_property in payload[service_name].keys():
+ value = payload[service_name][service_property]
+ if value != data[service_name][service_property]:
+ need_change = True
+ break
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"}
+
+ response = self.patch_request(self.root_uri + networkprotocol_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"}
+
+ @staticmethod
+ def to_singular(resource_name):
+ if resource_name.endswith('ies'):
+ resource_name = resource_name[:-3] + 'y'
+ elif resource_name.endswith('s'):
+ resource_name = resource_name[:-1]
+ return resource_name
+
+ def get_health_resource(self, subsystem, uri, health, expanded):
+ status = 'Status'
+
+ if expanded:
+ d = expanded
+ else:
+ r = self.get_request(self.root_uri + uri)
+ if r.get('ret'):
+ d = r.get('data')
+ else:
+ return
+
+ if 'Members' in d: # collections case
+ for m in d.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ p = r.get('data')
+ if p:
+ e = {self.to_singular(subsystem.lower()) + '_uri': u,
+ status: p.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+ else: # non-collections case
+ e = {self.to_singular(subsystem.lower()) + '_uri': uri,
+ status: d.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+
+ def get_health_subsystem(self, subsystem, data, health):
+ if subsystem in data:
+ sub = data.get(subsystem)
+ if isinstance(sub, list):
+ for r in sub:
+ if '@odata.id' in r:
+ uri = r.get('@odata.id')
+ expanded = None
+ if '#' in uri and len(r) > 1:
+ expanded = r
+ self.get_health_resource(subsystem, uri, health, expanded)
+ elif isinstance(sub, dict):
+ if '@odata.id' in sub:
+ uri = sub.get('@odata.id')
+ self.get_health_resource(subsystem, uri, health, None)
+ elif 'Members' in data:
+ for m in data.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ d = r.get('data')
+ self.get_health_subsystem(subsystem, d, health)
+
+ def get_health_report(self, category, uri, subsystems):
+ result = {}
+ health = {}
+ status = 'Status'
+
+ # Get health status of top level resource
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ health[category] = {status: data.get(status, "Status not available")}
+
+ # Get health status of subsystems
+ for sub in subsystems:
+ d = None
+ if sub.startswith('Links.'): # ex: Links.PCIeDevices
+ sub = sub[len('Links.'):]
+ d = data.get('Links', {})
+ elif '.' in sub: # ex: Thermal.Fans
+ p, sub = sub.split('.')
+ u = data.get(p, {}).get('@odata.id')
+ if u:
+ r = self.get_request(self.root_uri + u)
+ if r['ret']:
+ d = r['data']
+ if not d:
+ continue
+ else: # ex: Memory
+ d = data
+ health[sub] = []
+ self.get_health_subsystem(sub, d, health)
+ if not health[sub]:
+ del health[sub]
+
+ result["entries"] = health
+ return result
+
+ def get_system_health_report(self, systems_uri):
+ subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage',
+ 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts',
+ 'NetworkInterfaces.NetworkDeviceFunctions']
+ return self.get_health_report('System', systems_uri, subsystems)
+
+ def get_multi_system_health_report(self):
+ return self.aggregate_systems(self.get_system_health_report)
+
+ def get_chassis_health_report(self, chassis_uri):
+ subsystems = ['Power.PowerSupplies', 'Thermal.Fans',
+ 'Links.PCIeDevices']
+ return self.get_health_report('Chassis', chassis_uri, subsystems)
+
+ def get_multi_chassis_health_report(self):
+ return self.aggregate_chassis(self.get_chassis_health_report)
+
+ def get_manager_health_report(self, manager_uri):
+ subsystems = []
+ return self.get_health_report('Manager', manager_uri, subsystems)
+
+ def get_multi_manager_health_report(self):
+ return self.aggregate_managers(self.get_manager_health_report)
+
+ def set_manager_nic(self, nic_addr, nic_config):
+ # Get EthernetInterface collection
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'EthernetInterfaces' not in data:
+ return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
+ ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+
+ # Find target EthernetInterface
+ target_ethernet_uri = None
+ target_ethernet_current_setting = None
+ if nic_addr == 'null':
+ # Find root_uri matched EthernetInterface when nic_addr is not specified
+ nic_addr = (self.root_uri).split('/')[-1]
+ nic_addr = nic_addr.split(':')[0] # split port if existing
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if '"' + nic_addr + '"' in str(data) or "'" + nic_addr + "'" in str(data):
+ target_ethernet_uri = uri
+ target_ethernet_current_setting = data
+ break
+ if target_ethernet_uri is None:
+ return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
+
+ # Convert input to payload and check validity
+ payload = {}
+ for property in nic_config.keys():
+ value = nic_config[property]
+ if property not in target_ethernet_current_setting:
+ return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property}
+ if isinstance(value, dict):
+ if isinstance(target_ethernet_current_setting[property], dict):
+ payload[property] = value
+ elif isinstance(target_ethernet_current_setting[property], list):
+ payload[property] = list()
+ payload[property].append(value)
+ else:
+ return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property}
+ else:
+ payload[property] = value
+
+ # If no need change, nothing to do. If error detected, report it
+ need_change = False
+ for property in payload.keys():
+ set_value = payload[property]
+ cur_value = target_ethernet_current_setting[property]
+ # type is simple(not dict/list)
+ if not isinstance(set_value, dict) and not isinstance(set_value, list):
+ if set_value != cur_value:
+ need_change = True
+ # type is dict
+ if isinstance(set_value, dict):
+ for subprop in payload[property].keys():
+ if subprop not in target_ethernet_current_setting[property]:
+ return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
+ sub_set_value = payload[property][subprop]
+ sub_cur_value = target_ethernet_current_setting[property][subprop]
+ if sub_set_value != sub_cur_value:
+ need_change = True
+ # type is list
+ if isinstance(set_value, list):
+ for i in range(len(set_value)):
+ for subprop in payload[property][i].keys():
+ if subprop not in target_ethernet_current_setting[property][i]:
+ return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
+ sub_set_value = payload[property][i][subprop]
+ sub_cur_value = target_ethernet_current_setting[property][i][subprop]
+ if sub_set_value != sub_cur_value:
+ need_change = True
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"}
+
+ response = self.patch_request(self.root_uri + target_ethernet_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py
new file mode 100644
index 00000000..0fb6e9b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py
@@ -0,0 +1,270 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), James Laska
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+import shutil
+import tempfile
+import types
+
+from ansible.module_utils.six.moves import configparser
+
+
+class RegistrationBase(object):
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ redhat_repo = '/etc/yum.repos.d/redhat.repo'
+ if os.path.isfile(redhat_repo):
+ os.unlink(redhat_repo)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if os.path.isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', 1)
+ else:
+ cfg.set('main', 'enabled', 0)
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.config = self._read_config()
+ self.module = module
+
+ def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
+ '''
+ Load RHSM configuration from /etc/rhsm/rhsm.conf.
+ Returns:
+ * ConfigParser object
+ '''
+
+ # Read RHSM defaults ...
+ cp = configparser.ConfigParser()
+ cp.read(rhsm_conf)
+
+ # Add support for specifying a default value w/o having to standup some configuration
+ # Yeah, I know this should be subclassed ... but, oh well
+ def get_option_default(self, key, default=''):
+ sect, opt = key.split('.', 1)
+ if self.has_section(sect) and self.has_option(sect, opt):
+ return self.get(sect, opt)
+ else:
+ return default
+
+ cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
+
+ return cp
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHN
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--system.hostname'.
+ for k, v in kwargs.items():
+ if re.search(r'^(system|rhsm)_', k):
+ args.append('--%s=%s' % (k.replace('_', '.'), v))
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHN.
+ '''
+ args = ['subscription-manager', 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, autosubscribe, activationkey):
+ '''
+ Register the current system to the provided RHN server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'register']
+
+ # Generate command arguments
+ if activationkey:
+ args.append('--activationkey "%s"' % activationkey)
+ else:
+ if autosubscribe:
+ args.append('--autosubscribe')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+
+ # Do the needful...
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unsubscribe(self):
+ '''
+ Unsubscribe a system from all subscribed channels
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unsubscribe', '--all']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ for pool in available_pools.filter(regexp):
+ pool.subscribe()
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def subscribe(self):
+ args = "subscription-manager subscribe --pool %s" % self.PoolId
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+ def __init__(self, module):
+ self.module = module
+ self.products = self._load_product_list()
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self):
+ """
+ Loads list of all available pools for system in data structure
+ """
+ args = "subscription-manager list --available"
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of an output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py
new file mode 100644
index 00000000..93d3bfcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.0
+# Copyright (C) 2018 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from omsdk.sdkinfra import sdkinfra
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare, file_share_manager
+ from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
+ from omsdk.http.sdkwsmanbase import WsManOptions
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+class iDRACConnection:
+
+ def __init__(self, module_params):
+ if not HAS_OMSDK:
+ raise ImportError("Dell EMC OMSDK library is required for this module")
+ self.idrac_ip = module_params['idrac_ip']
+ self.idrac_user = module_params['idrac_user']
+ self.idrac_pwd = module_params['idrac_password']
+ self.idrac_port = module_params['idrac_port']
+ if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
+ raise ValueError("hostname, username and password required")
+ self.handle = None
+ self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
+ self.pOp = WsManOptions(port=self.idrac_port)
+ self.sdk = sdkinfra()
+ if self.sdk is None:
+ msg = "Could not initialize iDRAC drivers."
+ raise RuntimeError(msg)
+
+ def __enter__(self):
+ self.sdk.importPath()
+ self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp)
+ if self.handle is None:
+ msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip)
+ raise RuntimeError(msg)
+ return self.handle
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.handle.disconnect()
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py
new file mode 100644
index 00000000..9d02e550
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 1.3
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+SESSION_RESOURCE_COLLECTION = {
+ "SESSION": "SessionService/Sessions",
+ "SESSION_ID": "SessionService/Sessions('{Id}')",
+}
+
+
+class OpenURLResponse(object):
+ """Handles HTTPResponse"""
+
+ def __init__(self, resp):
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ raise ValueError("Unable to parse json")
+
+ @property
+ def status_code(self):
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ return self.status_code in (200, 201, 202, 204)
+
+ @property
+ def token_header(self):
+ return self.resp.headers.get('X-Auth-Token')
+
+
+class RestOME(object):
+ """Handles OME API requests"""
+
+ def __init__(self, module_params=None, req_session=False):
+ self.module_params = module_params
+ self.hostname = self.module_params["hostname"]
+ self.username = self.module_params["username"]
+ self.password = self.module_params["password"]
+ self.port = self.module_params["port"]
+ self.req_session = req_session
+ self.session_id = None
+ self.protocol = 'https'
+ self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ def _get_base_url(self):
+ """builds base url"""
+ return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
+
+ def _build_url(self, path, query_param=None):
+ """builds complete url"""
+ url = path
+ base_uri = self._get_base_url()
+ if path:
+ url = '{0}/{1}'.format(base_uri, path)
+ if query_param:
+ url += "?{0}".format(urlencode(query_param))
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """Creates an argument common spec"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = {
+ "method": method,
+ "validate_certs": False,
+ "use_proxy": True,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_without_session(self, method, api_timeout=30, headers=None):
+ """Creates an argument spec in case of basic authentication"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["url_username"] = self.username
+ url_kwargs["url_password"] = self.password
+ url_kwargs["force_basic_auth"] = True
+ return url_kwargs
+
+ def _args_with_session(self, method, api_timeout=30, headers=None):
+ """Creates an argument spec, in case of authentication with session"""
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["force_basic_auth"] = False
+ return url_kwargs
+
+ def invoke_request(self, method, path, data=None, query_param=None, headers=None,
+ api_timeout=30, dump=True):
+ """
+ Sends a request via open_url
+ Returns :class:`OpenURLResponse` object.
+ :arg method: HTTP verb to use for the request
+ :arg path: path to request without query parameter
+ :arg data: (optional) Payload to send with the request
+ :arg query_param: (optional) Dictionary of query parameter to send with request
+ :arg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :arg api_timeout: (optional) How long to wait for the server to send
+ data before giving up
+ :arg dump: (Optional) boolean value for dumping payload data.
+ :returns: OpenURLResponse
+ """
+ try:
+ if 'X-Auth-Token' in self._headers:
+ url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
+ else:
+ url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(path, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
+ raise err
+ return resp_data
+
+ def __enter__(self):
+ """Creates sessions by passing it to header"""
+ if self.req_session:
+ payload = {'UserName': self.username,
+ 'Password': self.password,
+ 'SessionType': 'API', }
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = self.invoke_request('POST', path, data=payload)
+ if resp and resp.success:
+ self.session_id = resp.json_data.get("Id")
+ self._headers["X-Auth-Token"] = resp.token_header
+ else:
+ msg = "Could not create the session"
+ raise ConnectionError(msg)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Deletes a session id, which is in use for request"""
+ if self.session_id:
+ path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
+ self.invoke_request('DELETE', path)
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
new file mode 100644
index 00000000..297397e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
@@ -0,0 +1,78 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their
+# own license to the complete work.
+#
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+#
+# Contains LXCA common class
+# Lenovo xClarity Administrator (LXCA)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+try:
+ from pylxca import connect, disconnect
+ HAS_PYLXCA = True
+except ImportError:
+ HAS_PYLXCA = False
+
+
+PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
+
+
+def has_pylxca(module):
+ """
+ Check pylxca is installed
+ :param module:
+ """
+ if not HAS_PYLXCA:
+ module.fail_json(msg=PYLXCA_REQUIRED)
+
+
+LXCA_COMMON_ARGS = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ auth_url=dict(required=True),
+)
+
+
+class connection_object:
+ def __init__(self, module):
+ self.module = module
+
+ def __enter__(self):
+ return setup_conn(self.module)
+
+ def __exit__(self, type, value, traceback):
+ close_conn()
+
+
+def setup_conn(module):
+ """
+ this function create connection to LXCA
+ :param module:
+ :return: lxca connection
+ """
+ lxca_con = None
+ try:
+ lxca_con = connect(module.params['auth_url'],
+ module.params['login_user'],
+ module.params['login_password'],
+ "True")
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+ return lxca_con
+
+
+def close_conn():
+ """
+ this function close connection to LXCA
+ :param module:
+ :return: None
+ """
+ disconnect()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py
new file mode 100644
index 00000000..3e16c716
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from stringprep import (
+ in_table_a1,
+ in_table_b1,
+ in_table_c3,
+ in_table_c4,
+ in_table_c5,
+ in_table_c6,
+ in_table_c7,
+ in_table_c8,
+ in_table_c9,
+ in_table_c12,
+ in_table_c21_c22,
+ in_table_d1,
+ in_table_d2,
+)
+from unicodedata import normalize
+
+from ansible.module_utils.six import text_type
+
+
+def is_unicode_str(string):
+ return True if isinstance(string, text_type) else False
+
+
+def mapping_profile(string):
+ """RFC4013 Mapping profile implementation."""
+ # Regarding RFC4013,
+ # This profile specifies:
+ # - non-ASCII space characters [StringPrep, C.1.2] that can be
+ # mapped to SPACE (U+0020), and
+ # - the "commonly mapped to nothing" characters [StringPrep, B.1]
+ # that can be mapped to nothing.
+
+ tmp = []
+ for c in string:
+ # If not the "commonly mapped to nothing"
+ if not in_table_b1(c):
+ if in_table_c12(c):
+ # map non-ASCII space characters
+ # (that can be mapped) to Unicode space
+ tmp.append(u' ')
+ else:
+ tmp.append(c)
+
+ return u"".join(tmp)
+
+
+def is_ral_string(string):
+ """RFC3454 Check bidirectional category of the string"""
+ # Regarding RFC3454,
+ # Table D.1 lists the characters that belong
+ # to Unicode bidirectional categories "R" and "AL".
+ # If a string contains any RandALCat character, a RandALCat
+ # character MUST be the first character of the string, and a
+ # RandALCat character MUST be the last character of the string.
+ if in_table_d1(string[0]):
+ if not in_table_d1(string[-1]):
+ raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
+ return True
+ return False
+
+
+def prohibited_output_profile(string):
+ """RFC4013 Prohibited output profile implementation."""
+ # Implements:
+ # RFC4013, 2.3. Prohibited Output.
+ # This profile specifies the following characters as prohibited input:
+ # - Non-ASCII space characters [StringPrep, C.1.2]
+ # - ASCII control characters [StringPrep, C.2.1]
+ # - Non-ASCII control characters [StringPrep, C.2.2]
+ # - Private Use characters [StringPrep, C.3]
+ # - Non-character code points [StringPrep, C.4]
+ # - Surrogate code points [StringPrep, C.5]
+ # - Inappropriate for plain text characters [StringPrep, C.6]
+ # - Inappropriate for canonical representation characters [StringPrep, C.7]
+ # - Change display properties or deprecated characters [StringPrep, C.8]
+ # - Tagging characters [StringPrep, C.9]
+ # RFC4013, 2.4. Bidirectional Characters.
+ # RFC4013, 2.5. Unassigned Code Points.
+
+ # Determine how to handle bidirectional characters (RFC3454):
+ if is_ral_string(string):
+ # If a string contains any RandALCat characters,
+ # The string MUST NOT contain any LCat character:
+ is_prohibited_bidi_ch = in_table_d2
+ bidi_table = 'D.2'
+ else:
+ # Forbid RandALCat characters in LCat string:
+ is_prohibited_bidi_ch = in_table_d1
+ bidi_table = 'D.1'
+
+ RFC = 'RFC4013'
+ for c in string:
+ # RFC4013 2.3. Prohibited Output:
+ if in_table_c12(c):
+ raise ValueError('%s: prohibited non-ASCII space characters '
+ 'that cannot be replaced (C.1.2).' % RFC)
+ if in_table_c21_c22(c):
+ raise ValueError('%s: prohibited control characters (C.2.1).' % RFC)
+ if in_table_c3(c):
+ raise ValueError('%s: prohibited private Use characters (C.3).' % RFC)
+ if in_table_c4(c):
+ raise ValueError('%s: prohibited non-character code points (C.4).' % RFC)
+ if in_table_c5(c):
+ raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC)
+ if in_table_c6(c):
+ raise ValueError('%s: prohibited inappropriate for plain text '
+ 'characters (C.6).' % RFC)
+ if in_table_c7(c):
+ raise ValueError('%s: prohibited inappropriate for canonical '
+ 'representation characters (C.7).' % RFC)
+ if in_table_c8(c):
+ raise ValueError('%s: prohibited change display properties / '
+ 'deprecated characters (C.8).' % RFC)
+ if in_table_c9(c):
+ raise ValueError('%s: prohibited tagging characters (C.9).' % RFC)
+
+ # RFC4013, 2.4. Bidirectional Characters:
+ if is_prohibited_bidi_ch(c):
+ raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table))
+
+ # RFC4013, 2.5. Unassigned Code Points:
+ if in_table_a1(c):
+ raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC)
+
+
+def saslprep(string):
+ """RFC4013 implementation.
+ Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
+ to prepare Unicode strings representing user names and passwords for comparison.
+ Regarding the RFC4013, the "SASLprep" profile is intended to be used by
+ Simple Authentication and Security Layer (SASL) mechanisms
+ (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
+ exchanging simple user names and/or passwords.
+
+ Args:
+ string (unicode string): Unicode string to validate and prepare.
+
+ Returns:
+ Prepared unicode string.
+ """
+ # RFC4013: "The algorithm assumes all strings are
+ # comprised of characters from the Unicode [Unicode] character set."
+ # Validate the string is a Unicode string
+ # (text_type is the string type if PY3 and unicode otherwise):
+ if not is_unicode_str(string):
+ raise TypeError('input must be of type %s, not %s' % (text_type, type(string)))
+
+ # RFC4013: 2.1. Mapping.
+ string = mapping_profile(string)
+
+ # RFC4013: 2.2. Normalization.
+ # "This profile specifies using Unicode normalization form KC."
+ string = normalize('NFKC', string)
+ if not string:
+ return u''
+
+ # RFC4013: 2.3. Prohibited Output.
+ # RFC4013: 2.4. Bidirectional Characters.
+ # RFC4013: 2.5. Unassigned Code Points.
+ prohibited_output_profile(string)
+
+ return string
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py
new file mode 100644
index 00000000..f5107fed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py
@@ -0,0 +1,195 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import re
+import sys
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+def scaleway_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ query_parameters=dict(type='dict', default={}),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+def payload_from_object(scw_object):
+ return dict(
+ (k, v)
+ for k, v in scw_object.items()
+ if k != 'id' and v is not None
+ )
+
+
+class ScalewayException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+# Specify a complete Link header, for validation purposes
+R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
+ (,<[^>]+>;\srel="(first|previous|next|last)")*'''
+# Specify a single relation, for iteration and string extraction purposes
+R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
+
+
+def parse_pagination_link(header):
+ if not re.match(R_LINK_HEADER, header, re.VERBOSE):
+ raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
+ else:
+ relations = header.split(',')
+ parsed_relations = {}
+ rc_relation = re.compile(R_RELATION)
+ for relation in relations:
+ match = rc_relation.match(relation)
+ if not match:
+ raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
+ data = match.groupdict()
+ parsed_relations[data['relation']] = data['target_IRI']
+ return parsed_relations
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Scaleway(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'X-Auth-Token': self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-Type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+
+ if not results.ok:
+ raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json.get(self.name)
+
+ def _url_builder(self, path, params):
+ d = self.module.params.get('query_parameters')
+ if params is not None:
+ d.update(params)
+ query_string = urlencode(d, doseq=True)
+
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
+
+ def send(self, method, path, data=None, headers=None, params=None):
+ url = self._url_builder(path=path, params=params)
+ self.warn(url)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ if self.headers['Content-Type'] == "application/json":
+ data = self.module.jsonify(data)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
+
+ def get(self, path, data=None, headers=None, params=None):
+ return self.send(method='GET', path=path, data=data, headers=headers, params=params)
+
+ def put(self, path, data=None, headers=None, params=None):
+ return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
+
+ def post(self, path, data=None, headers=None, params=None):
+ return self.send(method='POST', path=path, data=data, headers=headers, params=params)
+
+ def delete(self, path, data=None, headers=None, params=None):
+ return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
+
+ def patch(self, path, data=None, headers=None, params=None):
+ return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
+
+ def update(self, path, data=None, headers=None, params=None):
+ return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
+
+ def warn(self, x):
+ self.module.warn(str(x))
+
+
+SCALEWAY_LOCATION = {
+ 'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
+ 'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
+
+ 'par2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
+ 'EMEA-FR-PAR2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
+
+ 'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
+ 'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
+
+ 'waw1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
+ 'EMEA-PL-WAW1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
+}
+
+SCALEWAY_ENDPOINT = "https://api.scaleway.com"
+
+SCALEWAY_REGIONS = [
+ "fr-par",
+ "nl-ams",
+ "pl-waw",
+]
+
+SCALEWAY_ZONES = [
+ "fr-par-1",
+ "fr-par-2",
+ "nl-ams-1",
+ "pl-waw-1",
+]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
new file mode 100644
index 00000000..c17dcb1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+
+
+class BitbucketHelper:
+ BITBUCKET_API_URL = 'https://api.bitbucket.org'
+
+ error_messages = {
+ 'required_client_id': '`client_id` must be specified as a parameter or '
+ 'BITBUCKET_CLIENT_ID environment variable',
+ 'required_client_secret': '`client_secret` must be specified as a parameter or '
+ 'BITBUCKET_CLIENT_SECRET environment variable',
+ }
+
+ def __init__(self, module):
+ self.module = module
+ self.access_token = None
+
+ @staticmethod
+ def bitbucket_argument_spec():
+ return dict(
+ client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
+ client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
+ )
+
+ def check_arguments(self):
+ if self.module.params['client_id'] is None:
+ self.module.fail_json(msg=self.error_messages['required_client_id'])
+
+ if self.module.params['client_secret'] is None:
+ self.module.fail_json(msg=self.error_messages['required_client_secret'])
+
+ def fetch_access_token(self):
+ self.check_arguments()
+
+ headers = {
+ 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret'])
+ }
+
+ info, content = self.request(
+ api_url='https://bitbucket.org/site/oauth2/access_token',
+ method='POST',
+ data='grant_type=client_credentials',
+ headers=headers,
+ )
+
+ if info['status'] == 200:
+ self.access_token = content['access_token']
+ else:
+ self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
+
+ def request(self, api_url, method, data=None, headers=None):
+ headers = headers or {}
+
+ if self.access_token:
+ headers.update({
+ 'Authorization': 'Bearer {0}'.format(self.access_token),
+ })
+
+ if isinstance(data, dict):
+ data = self.module.jsonify(data)
+ headers.update({
+ 'Content-type': 'application/json',
+ })
+
+ response, info = fetch_url(
+ module=self.module,
+ url=api_url,
+ method=method,
+ headers=headers,
+ data=data,
+ force=True,
+ )
+
+ content = {}
+
+ if response is not None:
+ body = to_text(response.read())
+ if body:
+ content = json.loads(body)
+
+ return info, content
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
new file mode 100644
index 00000000..afb1b697
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
@@ -0,0 +1,20 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Luca 'remix_tj' Lorenzetto
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+emc_vnx_argument_spec = {
+ 'sp_address': dict(type='str', required=True),
+ 'sp_user': dict(type='str', required=False, default='sysadmin'),
+ 'sp_password': dict(type='str', required=False, default='sysadmin',
+ no_log=True),
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
new file mode 100644
index 00000000..47868a4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
@@ -0,0 +1,94 @@
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils import basic
+
+
+def convert_to_binary_multiple(size_with_unit):
+ if size_with_unit is None:
+ return -1
+ valid_units = ['MiB', 'GiB', 'TiB']
+ valid_unit = False
+ for unit in valid_units:
+ if size_with_unit.strip().endswith(unit):
+ valid_unit = True
+ size = size_with_unit.split(unit)[0]
+ if float(size) < 0:
+ return -1
+ if not valid_unit:
+ raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
+
+ size = size_with_unit.replace(" ", "").split('iB')[0]
+ size_kib = basic.human_to_bytes(size)
+ return int(size_kib / (1024 * 1024))
+
+
+storage_system_spec = {
+ "storage_system_ip": {
+ "required": True,
+ "type": "str"
+ },
+ "storage_system_username": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "storage_system_password": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "secure": {
+ "type": "bool",
+ "default": False
+ }
+}
+
+
+def cpg_argument_spec():
+ spec = {
+ "state": {
+ "required": True,
+ "choices": ['present', 'absent'],
+ "type": 'str'
+ },
+ "cpg_name": {
+ "required": True,
+ "type": "str"
+ },
+ "domain": {
+ "type": "str"
+ },
+ "growth_increment": {
+ "type": "str",
+ },
+ "growth_limit": {
+ "type": "str",
+ },
+ "growth_warning": {
+ "type": "str",
+ },
+ "raid_type": {
+ "required": False,
+ "type": "str",
+ "choices": ['R0', 'R1', 'R5', 'R6']
+ },
+ "set_size": {
+ "required": False,
+ "type": "int"
+ },
+ "high_availability": {
+ "type": "str",
+ "choices": ['PORT', 'CAGE', 'MAG']
+ },
+ "disk_type": {
+ "type": "str",
+ "choices": ['FC', 'NL', 'SSD']
+ }
+ }
+ spec.update(storage_system_spec)
+ return spec
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py
new file mode 100644
index 00000000..c1d8b777
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py
@@ -0,0 +1,278 @@
+# -*- coding: UTF-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""Univention Corporate Server (UCS) access module.
+
+Provides the following functions for working with an UCS server.
+
+ - ldap_search(filter, base=None, attr=None)
+ Search the LDAP via Univention's LDAP wrapper (ULDAP)
+
+ - config_registry()
+ Return the UCR registry object
+
+ - base_dn()
+ Return the configured Base DN according to the UCR
+
+ - uldap()
+ Return a handle to the ULDAP LDAP wrapper
+
+ - umc_module_for_add(module, container_dn, superordinate=None)
+ Return a UMC module for creating a new object of the given type
+
+ - umc_module_for_edit(module, object_dn, superordinate=None)
+ Return a UMC module for editing an existing object of the given type
+
+
+Any other module is not part of the "official" API and may change at any time.
+"""
+
+import re
+
+
+__all__ = [
+ 'ldap_search',
+ 'config_registry',
+ 'base_dn',
+ 'uldap',
+ 'umc_module_for_add',
+ 'umc_module_for_edit',
+]
+
+
+_singletons = {}
+
+
+def ldap_module():
+ import ldap as orig_ldap
+ return orig_ldap
+
+
+def _singleton(name, constructor):
+ if name in _singletons:
+ return _singletons[name]
+ _singletons[name] = constructor()
+ return _singletons[name]
+
+
+def config_registry():
+
+ def construct():
+ import univention.config_registry
+ ucr = univention.config_registry.ConfigRegistry()
+ ucr.load()
+ return ucr
+
+ return _singleton('config_registry', construct)
+
+
+def base_dn():
+ return config_registry()['ldap/base']
+
+
+def uldap():
+ "Return a configured univention uldap object"
+
+ def construct():
+ try:
+ secret_file = open('/etc/ldap.secret', 'r')
+ bind_dn = 'cn=admin,{0}'.format(base_dn())
+ except IOError: # pragma: no cover
+ secret_file = open('/etc/machine.secret', 'r')
+ bind_dn = config_registry()["ldap/hostdn"]
+ pwd_line = secret_file.readline()
+ pwd = re.sub('\n', '', pwd_line)
+
+ import univention.admin.uldap
+ return univention.admin.uldap.access(
+ host=config_registry()['ldap/master'],
+ base=base_dn(),
+ binddn=bind_dn,
+ bindpw=pwd,
+ start_tls=1,
+ )
+
+ return _singleton('uldap', construct)
+
+
+def config():
+ def construct():
+ import univention.admin.config
+ return univention.admin.config.config()
+ return _singleton('config', construct)
+
+
+def init_modules():
+ def construct():
+ import univention.admin.modules
+ univention.admin.modules.update()
+ return True
+ return _singleton('modules_initialized', construct)
+
+
+def position_base_dn():
+ def construct():
+ import univention.admin.uldap
+ return univention.admin.uldap.position(base_dn())
+ return _singleton('position_base_dn', construct)
+
+
+def ldap_dn_tree_parent(dn, count=1):
+ dn_array = dn.split(',')
+ dn_array[0:count] = []
+ return ','.join(dn_array)
+
+
+def ldap_search(filter, base=None, attr=None):
+ """Replaces uldaps search and uses a generator.
+ !! Arguments are not the same."""
+
+ if base is None:
+ base = base_dn()
+ msgid = uldap().lo.lo.search(
+ base,
+ ldap_module().SCOPE_SUBTREE,
+ filterstr=filter,
+ attrlist=attr
+ )
+ # I used to have a try: finally: here but there seems to be a bug in python
+ # which swallows the KeyboardInterrupt
+ # The abandon now doesn't make too much sense
+ while True:
+ result_type, result_data = uldap().lo.lo.result(msgid, all=0)
+ if not result_data:
+ break
+ if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
+ break
+ else:
+ if result_type is ldap_module().RES_SEARCH_ENTRY:
+ for res in result_data:
+ yield res
+ uldap().lo.lo.abandon(msgid)
+
+
+def module_by_name(module_name_):
+ """Returns an initialized UMC module, identified by the given name.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ If the module does not exist, a KeyError is raised.
+
+ The modules are cached, so they won't be re-initialized
+ in subsequent calls.
+ """
+
+ def construct():
+ import univention.admin.modules
+ init_modules()
+ module = univention.admin.modules.get(module_name_)
+ univention.admin.modules.init(uldap(), position_base_dn(), module)
+ return module
+
+ return _singleton('module/%s' % module_name_, construct)
+
+
+def get_umc_admin_objects():
+ """Convenience accessor for getting univention.admin.objects.
+
+ This implements delayed importing, so the univention.* modules
+ are not loaded until this function is called.
+ """
+ import univention.admin
+ return univention.admin.objects
+
+
+def umc_module_for_add(module, container_dn, superordinate=None):
+ """Returns an UMC module object prepared for creating a new entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The container_dn MUST be the dn of the container (not of the object to
+ be created itself!).
+ """
+ mod = module_by_name(module)
+
+ position = position_base_dn()
+ position.setDn(container_dn)
+
+ # config, ldap objects from common module
+ obj = mod.object(config(), uldap(), position, superordinate=superordinate)
+ obj.open()
+
+ return obj
+
+
+def umc_module_for_edit(module, object_dn, superordinate=None):
+ """Returns an UMC module object prepared for editing an existing entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The object_dn MUST be the dn of the object itself, not the container!
+ """
+ mod = module_by_name(module)
+
+ objects = get_umc_admin_objects()
+
+ position = position_base_dn()
+ position.setDn(ldap_dn_tree_parent(object_dn))
+
+ obj = objects.get(
+ mod,
+ config(),
+ uldap(),
+ position=position,
+ superordinate=superordinate,
+ dn=object_dn
+ )
+ obj.open()
+
+ return obj
+
+
+def create_containers_and_parents(container_dn):
+ """Create a container and if needed the parents containers"""
+ import univention.admin.uexceptions as uexcp
+ if not container_dn.startswith("cn="):
+ raise AssertionError()
+ try:
+ parent = ldap_dn_tree_parent(container_dn)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
+ except uexcp.ldapError:
+ create_containers_and_parents(parent)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py
new file mode 100644
index 00000000..0966dc50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py
@@ -0,0 +1,216 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class UTMModuleConfigurationError(Exception):
+
+ def __init__(self, msg, **args):
+ super(UTMModuleConfigurationError, self).__init__(self, msg)
+ self.msg = msg
+ self.module_fail_args = args
+
+ def do_fail(self, module):
+ module.fail_json(msg=self.msg, other=self.module_fail_args)
+
+
+class UTMModule(AnsibleModule):
+ """
+ This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
+ protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
+ just initialize this UTMModule class and define the Payload fields that are needed for your module.
+ See the other modules like utm_aaa_group for example.
+ """
+
+ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
+ supports_check_mode=False, required_if=None):
+ default_specs = dict(
+ headers=dict(type='dict', required=False, default={}),
+ utm_host=dict(type='str', required=True),
+ utm_port=dict(type='int', default=4444),
+ utm_token=dict(type='str', required=True, no_log=True),
+ utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
+ validate_certs=dict(type='bool', required=False, default=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
+ mutually_exclusive, required_together, required_one_of,
+ add_file_common_args, supports_check_mode, required_if)
+
+ def _merge_specs(self, default_specs, custom_specs):
+ result = default_specs.copy()
+ result.update(custom_specs)
+ return result
+
+
+class UTM:
+
+ def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
+ """
+ Initialize UTM Class
+ :param module: The Ansible module
+ :param endpoint: The corresponding endpoint to the module
+ :param change_relevant_keys: The keys of the object to check for changes
+ :param info_only: When implementing an info module, set this to true. Will allow access to the info method only
+ """
+ self.info_only = info_only
+ self.module = module
+ self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
+ module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
+
+ """
+ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
+ """
+ self.change_relevant_keys = change_relevant_keys
+ self.module.params['url_username'] = 'token'
+ self.module.params['url_password'] = module.params.get('utm_token')
+ if all(elem in self.change_relevant_keys for elem in module.params.keys()):
+ raise UTMModuleConfigurationError(
+ "The keys " + to_native(
+ self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
+ module.params.keys()))
+
+ def execute(self):
+ try:
+ if not self.info_only:
+ if self.module.params.get('state') == 'present':
+ self._add()
+ elif self.module.params.get('state') == 'absent':
+ self._remove()
+ else:
+ self._info()
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def _info(self):
+ """
+ returns the info for an object in utm
+ """
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ if result is None:
+ self.module.exit_json(changed=False)
+ else:
+ self.module.exit_json(result=result, changed=False)
+
+ def _add(self):
+ """
+ adds or updates a host object on utm
+ """
+
+ combined_headers = self._combine_headers()
+
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ data_as_json_string = self.module.jsonify(self.module.params)
+ if result is None:
+ response, info = fetch_url(self.module, self.request_url, method="POST",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ else:
+ if self._is_object_changed(self.change_relevant_keys, self.module, result):
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info['status'] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ self.module.exit_json(result=result, changed=is_changed)
+
+ def _combine_headers(self):
+ """
+ This will combine a header default with headers that come from the module declaration
+ :return: A combined headers dict
+ """
+ default_headers = {"Accept": "application/json", "Content-type": "application/json"}
+ if self.module.params.get('headers') is not None:
+ result = default_headers.copy()
+ result.update(self.module.params.get('headers'))
+ else:
+ result = default_headers
+ return result
+
+ def _remove(self):
+ """
+ removes an object from utm
+ """
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if result is not None:
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
+ headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
+ data=self.module.jsonify(self.module.params))
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ else:
+ is_changed = True
+ self.module.exit_json(changed=is_changed)
+
+ def _lookup_entry(self, module, request_url):
+ """
+ Lookup for existing entry
+ :param module:
+ :param request_url:
+ :return:
+ """
+ response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
+ result = None
+ if response is not None:
+ results = json.loads(response.read())
+ result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
+ return info, result
+
+ def _clean_result(self, result):
+ """
+ Will clean the result from irrelevant fields
+ :param result: The result from the query
+ :return: The modified result
+ """
+ del result['utm_host']
+ del result['utm_port']
+ del result['utm_token']
+ del result['utm_protocol']
+ del result['validate_certs']
+ del result['url_username']
+ del result['url_password']
+ del result['state']
+ return result
+
+ def _is_object_changed(self, keys, module, result):
+ """
+ Check if my object is changed
+ :param keys: The keys that will determine if an object is changed
+ :param module: The module
+ :param result: The result from the query
+ :return:
+ """
+ for key in keys:
+ if module.params.get(key) != result[key]:
+ return True
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py
new file mode 100644
index 00000000..e5c9bdb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+HAS_VEXATAPI = True
+try:
+ from vexatapi.vexata_api_proxy import VexataAPIProxy
+except ImportError:
+ HAS_VEXATAPI = False
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import env_fallback
+
+VXOS_VERSION = None
+
+
+def get_version(iocs_json):
+ if not iocs_json:
+ raise Exception('Invalid IOC json')
+ active = filter(lambda x: x['mgmtRole'], iocs_json)
+ if not active:
+ raise Exception('Unable to detect active IOC')
+ active = active[0]
+ ver = active['swVersion']
+ if ver[0] != 'v':
+ raise Exception('Illegal version string')
+ ver = ver[1:ver.find('-')]
+ ver = map(int, ver.split('.'))
+ return tuple(ver)
+
+
+def get_array(module):
+ """Return storage array object or fail"""
+ global VXOS_VERSION
+ array = module.params['array']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+ validate = module.params.get('validate_certs')
+
+ if not HAS_VEXATAPI:
+ module.fail_json(msg='vexatapi library is required for this module. '
+ 'To install, use `pip install vexatapi`')
+
+ if user and password:
+ system = VexataAPIProxy(array, user, password, verify_cert=validate)
+ else:
+ module.fail_json(msg='The user/password are required to be passed in to '
+ 'the module as arguments or by setting the '
+ 'VEXATA_USER and VEXATA_PASSWORD environment variables.')
+ try:
+ if system.test_connection():
+ VXOS_VERSION = get_version(system.iocs())
+ return system
+ else:
+ module.fail_json(msg='Test connection to array failed.')
+ except Exception as e:
+ module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
+
+
+def argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+ return dict(
+ array=dict(type='str',
+ required=True),
+ user=dict(type='str',
+ fallback=(env_fallback, ['VEXATA_USER'])),
+ password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['VEXATA_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=False),
+ )
+
+
+def required_together():
+ """Return the default list used for the required_together argument to AnsibleModule"""
+ return [['user', 'password']]
+
+
+def size_to_MiB(size):
+ """Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
+ quant = size[:-1]
+ exponent = size[-1]
+ if not quant.isdigit() or exponent not in 'MGT':
+ return -1
+ quant = int(quant)
+ if exponent == 'G':
+ quant <<= 10
+ elif exponent == 'T':
+ quant <<= 20
+ return quant
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py
new file mode 100644
index 00000000..dbc6a0ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py
@@ -0,0 +1,862 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import time
+import re
+import traceback
+
+XENAPI_IMP_ERR = None
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ HAS_XENAPI = False
+ XENAPI_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+
+def xenserver_common_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ aliases=['host', 'pool'],
+ required=False,
+ default='localhost',
+ fallback=(env_fallback, ['XENSERVER_HOST']),
+ ),
+ username=dict(type='str',
+ aliases=['user', 'admin'],
+ required=False,
+ default='root',
+ fallback=(env_fallback, ['XENSERVER_USER'])),
+ password=dict(type='str',
+ aliases=['pass', 'pwd'],
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=True,
+ fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
+ )
+
+
+def xapi_to_module_vm_power_state(power_state):
+ """Maps XAPI VM power states to module VM power states."""
+ module_power_state_map = {
+ "running": "poweredon",
+ "halted": "poweredoff",
+ "suspended": "suspended",
+ "paused": "paused"
+ }
+
+ return module_power_state_map.get(power_state)
+
+
+def module_to_xapi_vm_power_state(power_state):
+ """Maps module VM power states to XAPI VM power states."""
+ vm_power_state_map = {
+ "poweredon": "running",
+ "poweredoff": "halted",
+ "restarted": "running",
+ "suspended": "suspended",
+ "shutdownguest": "halted",
+ "rebootguest": "running",
+ }
+
+ return vm_power_state_map.get(power_state)
+
+
+def is_valid_ip_addr(ip_addr):
+ """Validates given string as IPv4 address for given string.
+
+ Args:
+ ip_addr (str): string to validate as IPv4 address.
+
+ Returns:
+ bool: True if string is valid IPv4 address, else False.
+ """
+ ip_addr_split = ip_addr.split('.')
+
+ if len(ip_addr_split) != 4:
+ return False
+
+ for ip_addr_octet in ip_addr_split:
+ if not ip_addr_octet.isdigit():
+ return False
+
+ ip_addr_octet_int = int(ip_addr_octet)
+
+ if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
+ return False
+
+ return True
+
+
+def is_valid_ip_netmask(ip_netmask):
+ """Validates given string as IPv4 netmask.
+
+ Args:
+ ip_netmask (str): string to validate as IPv4 netmask.
+
+ Returns:
+ bool: True if string is valid IPv4 netmask, else False.
+ """
+ ip_netmask_split = ip_netmask.split('.')
+
+ if len(ip_netmask_split) != 4:
+ return False
+
+ valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
+
+ for ip_netmask_octet in ip_netmask_split:
+ if ip_netmask_octet not in valid_octet_values:
+ return False
+
+ if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
+ return False
+
+ return True
+
+
+def is_valid_ip_prefix(ip_prefix):
+ """Validates given string as IPv4 prefix.
+
+ Args:
+ ip_prefix (str): string to validate as IPv4 prefix.
+
+ Returns:
+ bool: True if string is valid IPv4 prefix, else False.
+ """
+ if not ip_prefix.isdigit():
+ return False
+
+ ip_prefix_int = int(ip_prefix)
+
+ if ip_prefix_int < 0 or ip_prefix_int > 32:
+ return False
+
+ return True
+
+
+def ip_prefix_to_netmask(ip_prefix, skip_check=False):
+ """Converts IPv4 prefix to netmask.
+
+ Args:
+ ip_prefix (str): IPv4 prefix to convert.
+ skip_check (bool): Skip validation of IPv4 prefix
+ (default: False). Use if you are sure IPv4 prefix is valid.
+
+ Returns:
+ str: IPv4 netmask equivalent to given IPv4 prefix if
+ IPv4 prefix is valid, else an empty string.
+ """
+ if skip_check:
+ ip_prefix_valid = True
+ else:
+ ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
+
+ if ip_prefix_valid:
+ return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
+ else:
+ return ""
+
+
+def ip_netmask_to_prefix(ip_netmask, skip_check=False):
+ """Converts IPv4 netmask to prefix.
+
+ Args:
+ ip_netmask (str): IPv4 netmask to convert.
+ skip_check (bool): Skip validation of IPv4 netmask
+ (default: False). Use if you are sure IPv4 netmask is valid.
+
+ Returns:
+ str: IPv4 prefix equivalent to given IPv4 netmask if
+ IPv4 netmask is valid, else an empty string.
+ """
+ if skip_check:
+ ip_netmask_valid = True
+ else:
+ ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
+
+ if ip_netmask_valid:
+ return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
+ else:
+ return ""
+
+
+def is_valid_ip6_addr(ip6_addr):
+ """Validates given string as IPv6 address.
+
+ Args:
+ ip6_addr (str): string to validate as IPv6 address.
+
+ Returns:
+ bool: True if string is valid IPv6 address, else False.
+ """
+ ip6_addr = ip6_addr.lower()
+ ip6_addr_split = ip6_addr.split(':')
+
+ if ip6_addr_split[0] == "":
+ ip6_addr_split.pop(0)
+
+ if ip6_addr_split[-1] == "":
+ ip6_addr_split.pop(-1)
+
+ if len(ip6_addr_split) > 8:
+ return False
+
+ if ip6_addr_split.count("") > 1:
+ return False
+ elif ip6_addr_split.count("") == 1:
+ ip6_addr_split.remove("")
+ else:
+ if len(ip6_addr_split) != 8:
+ return False
+
+ ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
+
+ for ip6_addr_hextet in ip6_addr_split:
+ if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
+ return False
+
+ return True
+
+
+def is_valid_ip6_prefix(ip6_prefix):
+ """Validates given string as IPv6 prefix.
+
+ Args:
+ ip6_prefix (str): string to validate as IPv6 prefix.
+
+ Returns:
+ bool: True if string is valid IPv6 prefix, else False.
+ """
+ if not ip6_prefix.isdigit():
+ return False
+
+ ip6_prefix_int = int(ip6_prefix)
+
+ if ip6_prefix_int < 0 or ip6_prefix_int > 128:
+ return False
+
+ return True
+
+
+def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
+ """Finds and returns a reference to arbitrary XAPI object.
+
+ An object is searched by using either name (name_label) or UUID
+ with UUID taken precedence over name.
+
+ Args:
+ module: Reference to Ansible module object.
+ name (str): Name (name_label) of an object to search for.
+ uuid (str): UUID of an object to search for.
+ obj_type (str): Any valid XAPI object type. See XAPI docs.
+ fail (bool): Should function fail with error message if object
+ is not found or exit silently (default: True). The function
+ always fails if multiple objects with same name are found.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ XAPI reference to found object or None if object is not found
+ and fail=False.
+ """
+ xapi_session = XAPI.connect(module)
+
+ if obj_type in ["template", "snapshot"]:
+ real_obj_type = "VM"
+ elif obj_type == "home server":
+ real_obj_type = "host"
+ elif obj_type == "ISO image":
+ real_obj_type = "VDI"
+ else:
+ real_obj_type = obj_type
+
+ obj_ref = None
+
+ # UUID has precedence over name.
+ if uuid:
+ try:
+ # Find object by UUID. If no object is found using given UUID,
+ # an exception will be generated.
+ obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
+ except XenAPI.Failure as f:
+ if fail:
+ module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
+ elif name:
+ try:
+ # Find object by name (name_label).
+ obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ # If obj_ref_list is empty.
+ if not obj_ref_list:
+ if fail:
+ module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
+ # If obj_ref_list contains multiple object references.
+ elif len(obj_ref_list) > 1:
+ module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
+ # The obj_ref_list contains only one object reference.
+ else:
+ obj_ref = obj_ref_list[0]
+ else:
+ module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
+
+ return obj_ref
+
+
+def gather_vm_params(module, vm_ref):
+ """Gathers all VM parameters available in XAPI database.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+
+ Returns:
+ dict: VM parameters.
+ """
+ # We silently return empty vm_params if bad vm_ref was supplied.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ try:
+ vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
+
+ # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
+
+ # Affinity.
+ if vm_params['affinity'] != "OpaqueRef:NULL":
+ vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
+ vm_params['affinity'] = vm_affinity
+ else:
+ vm_params['affinity'] = {}
+
+ # VBDs.
+ vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
+
+ # List of VBDs is usually sorted by userdevice but we sort just
+ # in case. We need this list sorted by userdevice so that we can
+ # make positional pairing with module.params['disks'].
+ vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
+ vm_params['VBDs'] = vm_vbd_params_list
+
+ # VDIs.
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
+ vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
+ else:
+ vm_vdi_params = {}
+
+ vm_vbd_params['VDI'] = vm_vdi_params
+
+ # VIFs.
+ vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
+
+ # List of VIFs is usually sorted by device but we sort just
+ # in case. We need this list sorted by device so that we can
+ # make positional pairing with module.params['networks'].
+ vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
+ vm_params['VIFs'] = vm_vif_params_list
+
+ # Networks.
+ for vm_vif_params in vm_params['VIFs']:
+ if vm_vif_params['network'] != "OpaqueRef:NULL":
+ vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
+ else:
+ vm_network_params = {}
+
+ vm_vif_params['network'] = vm_network_params
+
+ # Guest metrics.
+ if vm_params['guest_metrics'] != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
+ vm_params['guest_metrics'] = vm_guest_metrics
+ else:
+ vm_params['guest_metrics'] = {}
+
+ # Detect customization agent.
+ xenserver_version = get_xenserver_version(module)
+
+ if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
+ "feature-static-ip-setting" in vm_params['guest_metrics']['other']):
+ vm_params['customization_agent'] = "native"
+ else:
+ vm_params['customization_agent'] = "custom"
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_params
+
+
+def gather_vm_facts(module, vm_params):
+ """Gathers VM facts.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+
+ Returns:
+ dict: VM facts.
+ """
+ # We silently return empty vm_facts if no vm_params are available.
+ if not vm_params:
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ # Gather facts.
+ vm_facts = {
+ "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
+ "name": vm_params['name_label'],
+ "name_desc": vm_params['name_description'],
+ "uuid": vm_params['uuid'],
+ "is_template": vm_params['is_a_template'],
+ "folder": vm_params['other_config'].get('folder', ''),
+ "hardware": {
+ "num_cpus": int(vm_params['VCPUs_max']),
+ "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
+ "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
+ },
+ "disks": [],
+ "cdrom": {},
+ "networks": [],
+ "home_server": vm_params['affinity'].get('name_label', ''),
+ "domid": vm_params['domid'],
+ "platform": vm_params['platform'],
+ "other_config": vm_params['other_config'],
+ "xenstore_data": vm_params['xenstore_data'],
+ "customization_agent": vm_params['customization_agent'],
+ }
+
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['type'] == "Disk":
+ vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
+
+ vm_disk_params = {
+ "size": int(vm_vbd_params['VDI']['virtual_size']),
+ "name": vm_vbd_params['VDI']['name_label'],
+ "name_desc": vm_vbd_params['VDI']['name_description'],
+ "sr": vm_disk_sr_params['name_label'],
+ "sr_uuid": vm_disk_sr_params['uuid'],
+ "os_device": vm_vbd_params['device'],
+ "vbd_userdevice": vm_vbd_params['userdevice'],
+ }
+
+ vm_facts['disks'].append(vm_disk_params)
+ elif vm_vbd_params['type'] == "CD":
+ if vm_vbd_params['empty']:
+ vm_facts['cdrom'].update(type="none")
+ else:
+ vm_facts['cdrom'].update(type="iso")
+ vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
+
+ for vm_vif_params in vm_params['VIFs']:
+ vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
+
+ vm_network_params = {
+ "name": vm_vif_params['network']['name_label'],
+ "mac": vm_vif_params['MAC'],
+ "vif_device": vm_vif_params['device'],
+ "mtu": vm_vif_params['MTU'],
+ "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
+ "prefix": "",
+ "netmask": "",
+ "gateway": "",
+ "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
+ vm_vif_params['device'])],
+ "prefix6": "",
+ "gateway6": "",
+ }
+
+ if vm_params['customization_agent'] == "native":
+ if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
+
+ vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
+
+ if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
+
+ vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
+
+ elif vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = vm_params['xenstore_data']
+
+ for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
+ vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
+
+ vm_facts['networks'].append(vm_network_params)
+
+ return vm_facts
+
+
+def set_vm_power_state(module, vm_ref, power_state, timeout=300):
+ """Controls VM power state.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ power_state (str): Power state to put VM into. Accepted values:
+
+ - poweredon
+ - poweredoff
+ - restarted
+ - suspended
+ - shutdownguest
+ - rebootguest
+
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ tuple (bool, str): Bool element is True if VM power state has
+ changed by calling this function, else False. Str element carries
+ a value of resulting power state as defined by XAPI - 'running',
+ 'halted' or 'suspended'.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ power_state = power_state.replace('_', '').replace('-', '').lower()
+ vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
+
+ state_changed = False
+
+ try:
+ # Get current state of the VM.
+ vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state_current != power_state:
+ if power_state == "poweredon":
+ if not module.check_mode:
+ # VM can be in either halted, suspended, paused or running state.
+ # For VM to be in running state, start has to be called on halted,
+ # resume on suspended and unpause on paused VM.
+ if vm_power_state_current == "poweredoff":
+ xapi_session.xenapi.VM.start(vm_ref, False, False)
+ elif vm_power_state_current == "suspended":
+ xapi_session.xenapi.VM.resume(vm_ref, False, False)
+ elif vm_power_state_current == "paused":
+ xapi_session.xenapi.VM.unpause(vm_ref)
+ elif power_state == "poweredoff":
+ if not module.check_mode:
+ # hard_shutdown will halt VM regardless of current state.
+ xapi_session.xenapi.VM.hard_shutdown(vm_ref)
+ elif power_state == "restarted":
+ # hard_reboot will restart VM only if VM is in paused or running state.
+ if vm_power_state_current in ["paused", "poweredon"]:
+ if not module.check_mode:
+ xapi_session.xenapi.VM.hard_reboot(vm_ref)
+ else:
+ module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "suspended":
+ # running state is required for suspend.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ xapi_session.xenapi.VM.suspend(vm_ref)
+ else:
+ module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "shutdownguest":
+ # running state is required for guest shutdown.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_shutdown(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
+ elif power_state == "rebootguest":
+ # running state is required for guest reboot.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_reboot(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
+ else:
+ module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
+
+ state_changed = True
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return (state_changed, vm_power_state_resulting)
+
+
+def wait_for_task(module, task_ref, timeout=300):
+ """Waits for async XAPI task to finish.
+
+ Args:
+ module: Reference to Ansible module object.
+ task_ref (str): XAPI reference to task.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ str: failure message on failure, else an empty string.
+ """
+ # Fail if we don't have a valid task reference.
+ if not task_ref or task_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ interval = 2
+
+ result = ""
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ try:
+ while time_left > 0:
+ task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
+
+ if task_status == "pending":
+ # Task is still running.
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+
+ continue
+ elif task_status == "success":
+ # Task is done.
+ break
+ else:
+ # Task failed.
+ result = task_status
+ break
+ else:
+ # We timed out.
+ result = "timeout"
+
+ xapi_session.xenapi.task.destroy(task_ref)
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return result
+
+
+def wait_for_vm_ip_address(module, vm_ref, timeout=300):
+ """Waits for VM to acquire an IP address.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ dict: VM guest metrics as retrieved by
+ VM_guest_metrics.get_record() XAPI method with info
+ on IP address acquired.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ vm_guest_metrics = {}
+
+ try:
+ # We translate VM power state string so that error message can be
+ # consistent with module VM power states.
+ vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state != 'poweredon':
+ module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
+
+ interval = 2
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ while time_left > 0:
+ vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
+
+ if vm_guest_metrics_ref != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
+ vm_ips = vm_guest_metrics['networks']
+
+ if "0/ip" in vm_ips:
+ break
+
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+ else:
+ # We timed out.
+ module.fail_json(msg="Timed out waiting for VM IP address!")
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_guest_metrics
+
+
+def get_xenserver_version(module):
+ """Returns XenServer version.
+
+ Args:
+ module: Reference to Ansible module object.
+
+ Returns:
+ list: Element [0] is major version. Element [1] is minor version.
+ Element [2] is update number.
+ """
+ xapi_session = XAPI.connect(module)
+
+ host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
+
+ try:
+ xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
+ except ValueError:
+ xenserver_version = [0, 0, 0]
+
+ return xenserver_version
+
+
+class XAPI(object):
+ """Class for XAPI session management."""
+ _xapi_session = None
+
+ @classmethod
+ def connect(cls, module, disconnect_atexit=True):
+ """Establishes XAPI connection and returns session reference.
+
+ If no existing session is available, establishes a new one
+ and returns it, else returns existing one.
+
+ Args:
+ module: Reference to Ansible module object.
+ disconnect_atexit (bool): Controls if method should
+ register atexit handler to disconnect from XenServer
+ on module exit (default: True).
+
+ Returns:
+ XAPI session reference.
+ """
+ if cls._xapi_session is not None:
+ return cls._xapi_session
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ ignore_ssl = not module.params['validate_certs']
+
+ if hostname == 'localhost':
+ cls._xapi_session = XenAPI.xapi_local()
+ username = ''
+ password = ''
+ else:
+ # If scheme is not specified we default to http:// because https://
+ # is problematic in most setups.
+ if not hostname.startswith("http://") and not hostname.startswith("https://"):
+ hostname = "http://%s" % hostname
+
+ try:
+ # ignore_ssl is supported in XenAPI library from XenServer 7.2
+ # SDK onward but there is no way to tell which version we
+ # are using. TypeError will be raised if ignore_ssl is not
+ # supported. Additionally, ignore_ssl requires Python 2.7.9
+ # or newer.
+ cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
+ except TypeError:
+ # Try without ignore_ssl.
+ cls._xapi_session = XenAPI.Session(hostname)
+
+ if not password:
+ password = ''
+
+ try:
+ cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
+ except XenAPI.Failure as f:
+ module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
+
+ # Disabling atexit should be used in special cases only.
+ if disconnect_atexit:
+ atexit.register(cls._xapi_session.logout)
+
+ return cls._xapi_session
+
+
+class XenServerObject(object):
+ """Base class for all XenServer objects.
+
+ This class contains active XAPI session reference and common
+ attributes with useful info about XenServer host/pool.
+
+ Attributes:
+ module: Reference to Ansible module object.
+ xapi_session: Reference to XAPI session.
+ pool_ref (str): XAPI reference to a pool currently connected to.
+ default_sr_ref (str): XAPI reference to a pool default
+ Storage Repository.
+ host_ref (str): XAPI rerefence to a host currently connected to.
+ xenserver_version (list of str): Contains XenServer major and
+ minor version.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerObject using common module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ if not HAS_XENAPI:
+ module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
+
+ self.module = module
+ self.xapi_session = XAPI.connect(module)
+
+ try:
+ self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
+ self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
+ self.xenserver_version = get_xenserver_version(module)
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
new file mode 100644
index 00000000..27bfc1a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+"""short_description: Check or wait for migrations between nodes"""
+
+# Copyright: (c) 2018, Albert Autin
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: aerospike_migrations
+short_description: Check or wait for migrations between nodes
+description:
+ - This can be used to check for migrations in a cluster.
+ This makes it easy to do a rolling upgrade/update on Aerospike nodes.
+ - If waiting for migrations is not desired, simply just poll until
+ port 3000 if available or asinfo -v status returns ok
+author: "Albert Autin (@Alb0t)"
+options:
+ host:
+ description:
+ - Which host do we use as seed for info connection
+ required: False
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port)
+ required: False
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds)
+ required: False
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations"
+ consecutively before returning OK back to ansible?
+ required: False
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: False
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: False
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node
+ before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: True
+ type: bool
+ min_cluster_size:
+ description:
+ - Check will return bad until cluster size is met
+ or until tries is exhausted
+ required: False
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes
+ if something else is changing the cluster, we may want to fail
+ required: False
+ type: bool
+ default: True
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than
+ version 4.3, then the C(cluster-stable) info command will be used.
+ Inside this command, you can optionally specify what the target
+ cluster size is - but it is not necessary. You can still rely on
+ min_cluster_size if you don't want to use this option.
+ - If this option is specified on a cluster that has at least 1
+ host <4.3 then it will be ignored until the min version reaches
+ 4.3.
+ required: False
+ type: int
+'''
+EXAMPLES = '''
+# check for migrations on local node
+- name: Wait for migrations on local node before proceeding
+ community.general.aerospike_migrations:
+ host: "localhost"
+ connect_timeout: 2000
+ consecutive_good_checks: 5
+ sleep_between_checks: 15
+ tries_limit: 600
+ local_only: False
+
+# example playbook:
+---
+- name: Upgrade aerospike
+ hosts: all
+ become: true
+ serial: 1
+ tasks:
+ - name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - python
+ - python-pip
+ - python-setuptools
+ state: latest
+ - name: Setup aerospike
+ ansible.builtin.pip:
+ name: aerospike
+# check for migrations every (sleep_between_checks)
+# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
+# Will exit if any exception, which can be caused by bad nodes,
+# nodes not returning data, or other reasons.
+# Maximum runtime before giving up in this case will be:
+# Tries Limit * Sleep Between Checks * delay * retries
+ - name: Wait for aerospike migrations
+ community.general.aerospike_migrations:
+ local_only: True
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
+ register: migrations_check
+ until: migrations_check is succeeded
+ changed_when: false
+ delay: 60
+ retries: 120
+ - name: Another thing
+ ansible.builtin.shell: |
+ echo foo
+ - name: Reboot
+ ansible.builtin.reboot:
+'''
+
+RETURN = '''
+# Returns only a success/failure result. Changed is always false.
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+LIB_FOUND_ERR = None
+try:
+ import aerospike
+ from time import sleep
+ import re
+except ImportError as ie:
+ LIB_FOUND = False
+ LIB_FOUND_ERR = traceback.format_exc()
+else:
+ LIB_FOUND = True
+
+
+def run_module():
+ """run ansible module"""
+ module_args = dict(
+ host=dict(type='str', required=False, default='localhost'),
+ port=dict(type='int', required=False, default=3000),
+ connect_timeout=dict(type='int', required=False, default=1000),
+ consecutive_good_checks=dict(type='int', required=False, default=3),
+ sleep_between_checks=dict(type='int', required=False, default=60),
+ tries_limit=dict(type='int', required=False, default=300),
+ local_only=dict(type='bool', required=True),
+ min_cluster_size=dict(type='int', required=False, default=1),
+ target_cluster_size=dict(type='int', required=False, default=None),
+ fail_on_cluster_change=dict(type='bool', required=False, default=True),
+ migrate_tx_key=dict(type='str', required=False,
+ default="migrate_tx_partitions_remaining"),
+ migrate_rx_key=dict(type='str', required=False,
+ default="migrate_rx_partitions_remaining")
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not LIB_FOUND:
+ module.fail_json(msg=missing_required_lib('aerospike'),
+ exception=LIB_FOUND_ERR)
+
+ try:
+ if module.check_mode:
+ has_migrations, skip_reason = False, None
+ else:
+ migrations = Migrations(module)
+ has_migrations, skip_reason = migrations.has_migs(
+ module.params['local_only']
+ )
+
+ if has_migrations:
+ module.fail_json(msg="Failed.", skip_reason=skip_reason)
+ except Exception as e:
+ module.fail_json(msg="Error: {0}".format(e))
+
+ module.exit_json(**result)
+
+
+class Migrations:
+ """ Check or wait for migrations between nodes """
+
+ def __init__(self, module):
+ self.module = module
+ self._client = self._create_client().connect()
+ self._nodes = {}
+ self._update_nodes_list()
+ self._cluster_statistics = {}
+ self._update_cluster_statistics()
+ self._namespaces = set()
+ self._update_cluster_namespace_list()
+ self._build_list = set()
+ self._update_build_list()
+ self._start_cluster_key = \
+ self._cluster_statistics[self._nodes[0]]['cluster_key']
+
+ def _create_client(self):
+ """ TODO: add support for auth, tls, and other special features
+ I won't use those features, so I'll wait until somebody complains
+ or does it for me (Cross fingers)
+ create the client object"""
+ config = {
+ 'hosts': [
+ (self.module.params['host'], self.module.params['port'])
+ ],
+ 'policies': {
+ 'timeout': self.module.params['connect_timeout']
+ }
+ }
+ return aerospike.client(config)
+
+ def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
+ """delimiter is for separate stats that come back, NOT for kv
+ separation which is ="""
+ if node is None: # If no node passed, use the first one (local)
+ node = self._nodes[0]
+ data = self._client.info_node(cmd, node)
+ data = data.split("\t")
+ if len(data) != 1 and len(data) != 2:
+ self.module.fail_json(
+ msg="Unexpected number of values returned in info command: " +
+ str(len(data))
+ )
+ # data will be in format 'command\touput'
+ data = data[-1]
+ data = data.rstrip("\n\r")
+ data_arr = data.split(delimiter)
+
+ # some commands don't return in kv format
+ # so we dont want a dict from those.
+ if '=' in data:
+ retval = dict(
+ metric.split("=", 1) for metric in data_arr
+ )
+ else:
+ # if only 1 element found, and not kv, return just the value.
+ if len(data_arr) == 1:
+ retval = data_arr[0]
+ else:
+ retval = data_arr
+ return retval
+
+ def _update_build_list(self):
+ """creates self._build_list which is a unique list
+ of build versions."""
+ self._build_list = set()
+ for node in self._nodes:
+ build = self._info_cmd_helper('build', node)
+ self._build_list.add(build)
+
+ # just checks to see if the version is 4.3 or greater
+ def _can_use_cluster_stable(self):
+ # if version <4.3 we can't use cluster-stable info cmd
+ # regex hack to check for versions beginning with 0-3 or
+ # beginning with 4.0,4.1,4.2
+ if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
+ return False
+ return True
+
+ def _update_cluster_namespace_list(self):
+ """ make a unique list of namespaces
+ TODO: does this work on a rolling namespace add/deletion?
+ thankfully if it doesn't, we dont need this on builds >=4.3"""
+ self._namespaces = set()
+ for node in self._nodes:
+ namespaces = self._info_cmd_helper('namespaces', node)
+ for namespace in namespaces:
+ self._namespaces.add(namespace)
+
+ def _update_cluster_statistics(self):
+ """create a dict of nodes with their related stats """
+ self._cluster_statistics = {}
+ for node in self._nodes:
+ self._cluster_statistics[node] = \
+ self._info_cmd_helper('statistics', node)
+
+ def _update_nodes_list(self):
+ """get a fresh list of all the nodes"""
+ self._nodes = self._client.get_nodes()
+ if not self._nodes:
+ self.module.fail_json("Failed to retrieve at least 1 node.")
+
+ def _namespace_has_migs(self, namespace, node=None):
+ """returns a True or False.
+ Does the namespace have migrations for the node passed?
+ If no node passed, uses the local node or the first one in the list"""
+ namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
+ try:
+ namespace_tx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ namespace_rx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ except KeyError:
+ self.module.fail_json(
+ msg="Did not find partition remaining key:" +
+ self.module.params['migrate_tx_key'] +
+ " or key:" +
+ self.module.params['migrate_rx_key'] +
+ " in 'namespace/" +
+ namespace +
+ "' output."
+ )
+ except TypeError:
+ self.module.fail_json(
+ msg="namespace stat returned was not numerical"
+ )
+ return namespace_tx != 0 or namespace_rx != 0
+
+ def _node_has_migs(self, node=None):
+ """just calls namespace_has_migs and
+ if any namespace has migs returns true"""
+ migs = 0
+ self._update_cluster_namespace_list()
+ for namespace in self._namespaces:
+ if self._namespace_has_migs(namespace, node):
+ migs += 1
+ return migs != 0
+
+ def _cluster_key_consistent(self):
+ """create a dictionary to store what each node
+ returns the cluster key as. we should end up with only 1 dict key,
+ with the key being the cluster key."""
+ cluster_keys = {}
+ for node in self._nodes:
+ cluster_key = self._cluster_statistics[node][
+ 'cluster_key']
+ if cluster_key not in cluster_keys:
+ cluster_keys[cluster_key] = 1
+ else:
+ cluster_keys[cluster_key] += 1
+ if len(cluster_keys.keys()) == 1 and \
+ self._start_cluster_key in cluster_keys:
+ return True
+ return False
+
+ def _cluster_migrates_allowed(self):
+ """ensure all nodes have 'migrate_allowed' in their stats output"""
+ for node in self._nodes:
+ node_stats = self._info_cmd_helper('statistics', node)
+ allowed = node_stats['migrate_allowed']
+ if allowed == "false":
+ return False
+ return True
+
+ def _cluster_has_migs(self):
+ """calls node_has_migs for each node"""
+ migs = 0
+ for node in self._nodes:
+ if self._node_has_migs(node):
+ migs += 1
+ if migs == 0:
+ return False
+ return True
+
+ def _has_migs(self, local):
+ if local:
+ return self._local_node_has_migs()
+ return self._cluster_has_migs()
+
+ def _local_node_has_migs(self):
+ return self._node_has_migs(None)
+
+ def _is_min_cluster_size(self):
+ """checks that all nodes in the cluster are returning the
+ minimum cluster size specified in their statistics output"""
+ sizes = set()
+ for node in self._cluster_statistics:
+ sizes.add(int(self._cluster_statistics[node]['cluster_size']))
+
+ if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
+ return False
+ if (min(sizes)) >= self.module.params['min_cluster_size']:
+ return True
+ return False
+
+ def _cluster_stable(self):
+ """Added 4.3:
+ cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
+ Returns the current 'cluster_key' when the following are satisfied:
+
+ If 'size' is specified then the target node's 'cluster-size'
+ must match size.
+ If 'ignore-migrations' is either unspecified or 'false' then
+ the target node's migrations counts must be zero for the provided
+ 'namespace' or all namespaces if 'namespace' is not provided."""
+ cluster_key = set()
+ cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
+ cmd = "cluster-stable:"
+ target_cluster_size = self.module.params['target_cluster_size']
+ if target_cluster_size is not None:
+ cmd = cmd + "size=" + str(target_cluster_size) + ";"
+ for node in self._nodes:
+ try:
+ cluster_key.add(self._info_cmd_helper(cmd, node))
+ except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
+ if 'unstable-cluster' in e.msg:
+ return False
+ raise e
+ if len(cluster_key) == 1:
+ return True
+ return False
+
+ def _cluster_good_state(self):
+ """checks a few things to make sure we're OK to say the cluster
+ has no migs. It could be in a unhealthy condition that does not allow
+ migs, or a split brain"""
+ if self._cluster_key_consistent() is not True:
+ return False, "Cluster key inconsistent."
+ if self._is_min_cluster_size() is not True:
+ return False, "Cluster min size not reached."
+ if self._cluster_migrates_allowed() is not True:
+ return False, "migrate_allowed is false somewhere."
+ return True, "OK."
+
+ def has_migs(self, local=True):
+ """returns a boolean, False if no migrations otherwise True"""
+ consecutive_good = 0
+ try_num = 0
+ skip_reason = list()
+ while \
+ try_num < int(self.module.params['tries_limit']) and \
+ consecutive_good < \
+ int(self.module.params['consecutive_good_checks']):
+
+ self._update_nodes_list()
+ self._update_cluster_statistics()
+
+ # These checks are outside of the while loop because
+ # we probably want to skip & sleep instead of failing entirely
+ stable, reason = self._cluster_good_state()
+ if stable is not True:
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + reason
+ )
+ else:
+ if self._can_use_cluster_stable():
+ if self._cluster_stable():
+ consecutive_good += 1
+ else:
+ consecutive_good = 0
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " cluster_stable"
+ )
+ elif self._has_migs(local):
+ # print("_has_migs")
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " migrations"
+ )
+ consecutive_good = 0
+ else:
+ consecutive_good += 1
+ if consecutive_good == self.module.params[
+ 'consecutive_good_checks']:
+ break
+ try_num += 1
+ sleep(self.module.params['sleep_between_checks'])
+ # print(skip_reason)
+ if consecutive_good == self.module.params['consecutive_good_checks']:
+ return False, None
+ return True, skip_reason
+
+
+def main():
+ """main method for ansible module"""
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
new file mode 100644
index 00000000..3e7938bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+author:
+- "Bruce Pennypacker (@bpennypacker)"
+- "Patrick Humpal (@phumpal)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+ - Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
+options:
+ project_id:
+ description:
+ - Airbrake PROJECT_ID
+ required: false
+ type: str
+ version_added: '0.2.0'
+ project_key:
+ description:
+ - Airbrake PROJECT_KEY.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ type: str
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ type: str
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ type: str
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ required: false
+ type: str
+ version:
+ description:
+ - A string identifying what version was deployed
+ required: false
+ type: str
+ version_added: '1.0.0'
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://api.airbrake.io/api/v4/projects/"
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ token:
+ description:
+ - This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
+ required: false
+ type: str
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify airbrake about an app deployment
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: '4.2'
+
+- name: Notify airbrake about an app deployment, using git hash as revision
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
+ version: '0.2.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=False, no_log=True, type='str'),
+ project_id=dict(required=False, no_log=True, type='str'),
+ project_key=dict(required=False, no_log=True, type='str'),
+ environment=dict(required=True, type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ version=dict(required=False, type='str'),
+ url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ required_together=[('project_id', 'project_key')],
+ mutually_exclusive=[('project_id', 'token')],
+ )
+
+ # Build list of params
+ params = {}
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if module.params["token"]:
+ # v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ # version not supported in v2 API; omit
+
+ module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
+ "it and use 'project_id' and 'project_key' instead",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ params["api_key"] = module.params["token"]
+
+ # Allow sending to Airbrake compliant v2 APIs
+ if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
+ url = 'https://api.airbrake.io/deploys.txt'
+ else:
+ url = module.params["url"]
+
+ # Send the data to airbrake
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+
+ if module.params["project_id"] and module.params["project_key"]:
+ # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
+ if module.params["environment"]:
+ params["environment"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["username"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["repository"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["revision"] = module.params["revision"]
+
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+
+ # Build deploy url
+ url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
+ json_body = module.jsonify(params)
+
+ # Build header
+ headers = {'Content-Type': 'application/json'}
+
+ # Notify Airbrake of deploy
+ response, info = fetch_url(module, url, data=json_body,
+ headers=headers, method='POST')
+
+ if info['status'] == 200 or info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py
new file mode 100644
index 00000000..89468059
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_devices
+short_description: Manages AIX devices
+description:
+- This module discovers, defines, removes and modifies attributes of AIX devices.
+options:
+ attributes:
+ description:
+ - A list of device attributes.
+ type: dict
+ device:
+ description:
+ - The name of the device.
+ - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ type: str
+ force:
+ description:
+ - Forces action.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Removes or defines a device and children devices.
+ type: bool
+ default: no
+ state:
+ description:
+ - Controls the device state.
+ - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
+ - C(removed) (alias C(absent) removes a device.
+ - C(defined) changes device to Defined state.
+ type: str
+ choices: [ available, defined, removed ]
+ default: available
+'''
+
+EXAMPLES = r'''
+- name: Scan new devices
+ community.general.aix_devices:
+ device: all
+ state: available
+
+- name: Scan new virtual devices (vio0)
+ community.general.aix_devices:
+ device: vio0
+ state: available
+
+- name: Removing IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2
+ community.general.aix_devices:
+ device: ent2
+ state: removed
+
+- name: Put device en2 in Defined
+ community.general.aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ community.general.aix_devices:
+ device: ent4
+ state: removed
+
+- name: Put device en4 in Defined (inexistent)
+ community.general.aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: removed
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: off
+ state: available
+
+- name: Configure IP, netmask and set en1 up.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: available
+
+- name: Adding IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: available
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_device(module, device):
+ """
+ Check if device already exists and the state.
+ Args:
+ module: Ansible module.
+ device: device to be checked.
+
+ Returns: bool, device state
+
+ """
+ lsdev_cmd = module.get_bin_path('lsdev', True)
+ rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
+
+ if lsdev_out:
+ device_state = lsdev_out.split()[1]
+ return True, device_state
+
+ device_state = None
+ return False, device_state
+
+
+def _check_device_attr(module, device, attr):
+ """
+
+ Args:
+ module: Ansible module.
+ device: device to check attributes.
+ attr: attribute to be checked.
+
+ Returns:
+
+ """
+ lsattr_cmd = module.get_bin_path('lsattr', True)
+ rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
+
+ hidden_attrs = ['delalias4', 'delalias6']
+
+ if rc == 255:
+
+ if attr in hidden_attrs:
+ current_param = ''
+ else:
+ current_param = None
+
+ return current_param
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
+
+ current_param = lsattr_out.split()[1]
+ return current_param
+
+
+def discover_device(module, device):
+ """ Discover AIX devices."""
+ cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
+
+ if device is not None:
+ device = "-l %s" % device
+
+ else:
+ device = ''
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
+ changed = True
+ msg = cfgmgr_out
+
+ return changed, msg
+
+
+def change_device_attr(module, attributes, device, force):
+ """ Change AIX device attribute. """
+
+ attr_changed = []
+ attr_not_changed = []
+ attr_invalid = []
+ chdev_cmd = module.get_bin_path('chdev', True)
+
+ for attr in list(attributes.keys()):
+ new_param = attributes[attr]
+ current_param = _check_device_attr(module, device, attr)
+
+ if current_param is None:
+ attr_invalid.append(attr)
+
+ elif current_param != new_param:
+ if force:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
+ else:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
+
+ if not module.check_mode:
+ rc, chdev_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
+
+ attr_changed.append(attributes[attr])
+ else:
+ attr_not_changed.append(attributes[attr])
+
+ if len(attr_changed) > 0:
+ changed = True
+ attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
+ else:
+ changed = False
+ attr_changed_msg = ''
+
+ if len(attr_not_changed) > 0:
+ attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
+ else:
+ attr_not_changed_msg = ''
+
+ if len(attr_invalid) > 0:
+ attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
+ else:
+ attr_invalid_msg = ''
+
+ msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
+
+ return changed, msg
+
+
+def remove_device(module, device, force, recursive, state):
+ """ Puts device in defined state or removes device. """
+
+ state_opt = {
+ 'removed': '-d',
+ 'absent': '-d',
+ 'defined': ''
+ }
+
+ recursive_opt = {
+ True: '-R',
+ False: ''
+ }
+
+ recursive = recursive_opt[recursive]
+ state = state_opt[state]
+
+ changed = True
+ msg = ''
+ rmdev_cmd = module.get_bin_path('rmdev', True)
+
+ if not module.check_mode:
+ if state:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
+ else:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
+
+ msg = rmdev_out
+
+ return changed, msg
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ attributes=dict(type='dict'),
+ device=dict(type='str'),
+ force=dict(type='bool', default=False),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
+ ),
+ supports_check_mode=True,
+ )
+
+ force_opt = {
+ True: '-f',
+ False: '',
+ }
+
+ attributes = module.params['attributes']
+ device = module.params['device']
+ force = force_opt[module.params['force']]
+ recursive = module.params['recursive']
+ state = module.params['state']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'available' or state == 'present':
+ if attributes:
+ # change attributes on device
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ # discovery devices (cfgmgr)
+ if device and device != 'all':
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ # run cfgmgr on specific device
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ elif state == 'removed' or state == 'absent' or state == 'defined':
+ if not device:
+ result['msg'] = "device is required to removed or defined state."
+
+ else:
+ # Remove device
+ check_device, device_state = _check_device(module, device)
+ if check_device:
+ if state == 'defined' and device_state == 'Defined':
+ result['changed'] = False
+ result['msg'] = 'Device %s already in Defined' % device
+
+ else:
+ result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py
new file mode 100644
index 00000000..58a5c25d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_filesystem
+short_description: Configure LVM and NFS file systems for AIX
+description:
+ - This module creates, removes, mount and unmount LVM and NFS file system for
+ AIX using C(/etc/filesystems).
+ - For LVM file systems is possible to resize a file system.
+options:
+ account_subsystem:
+ description:
+ - Specifies whether the file system is to be processed by the accounting subsystem.
+ type: bool
+ default: no
+ attributes:
+ description:
+ - Specifies attributes for files system separated by comma.
+ type: list
+ elements: str
+ default: agblksize='4096',isnapshot='no'
+ auto_mount:
+ description:
+ - File system is automatically mounted at system restart.
+ type: bool
+ default: yes
+ device:
+ description:
+ - Logical volume (LV) device name or remote export device to create a NFS file system.
+ - It is used to create a file system on an already existing logical volume or the exported NFS file system.
+ - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ type: str
+ fs_type:
+ description:
+ - Specifies the virtual file system type.
+ type: str
+ default: jfs2
+ permissions:
+ description:
+ - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ type: str
+ choices: [ ro, rw ]
+ default: rw
+ mount_group:
+ description:
+ - Specifies the mount group.
+ type: str
+ filesystem:
+ description:
+ - Specifies the mount point, which is the directory where the file system will be mounted.
+ type: str
+ required: true
+ nfs_server:
+ description:
+ - Specifies a Network File System (NFS) server.
+ type: str
+ rm_mount_point:
+ description:
+ - Removes the mount point directory when used with state C(absent).
+ type: bool
+ default: no
+ size:
+ description:
+ - Specifies the file system size.
+ - For already C(present) it will be resized.
+ - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
+ it will be in Megabytes. If the value has G specified it will be in
+ Gigabytes.
+ - If no M or G the value will be 512-byte blocks.
+ - If "+" is specified in begin of value, the value will be added.
+ - If "-" is specified in begin of value, the value will be removed.
+ - If "+" or "-" is not specified, the total value will be the specified.
+ - Size will respects the LVM AIX standards.
+ type: str
+ state:
+ description:
+ - Controls the file system state.
+ - C(present) check if file system exists, creates or resize.
+ - C(absent) removes existing file system if already C(unmounted).
+ - C(mounted) checks if the file system is mounted or mount the file system.
+ - C(unmounted) check if the file system is unmounted or unmount the file system.
+ type: str
+ choices: [ absent, mounted, present, unmounted ]
+ default: present
+ vg:
+ description:
+ - Specifies an existing volume group (VG).
+ type: str
+notes:
+ - For more C(attributes), please check "crfs" AIX manual.
+'''
+
+EXAMPLES = r'''
+- name: Create filesystem in a previously defined logical volume.
+ community.general.aix_filesystem:
+ device: testlv
+ community.general.filesystem: /testfs
+ state: present
+
+- name: Creating NFS filesystem from nfshost.
+ community.general.aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ community.general.filesystem: /home/ftp
+ state: present
+
+- name: Creating a new file system without a previously logical volume.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+- name: Unmounting /testfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /testfs
+ state: unmounted
+
+- name: Resizing /mksysb to +512M.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Remove NFS filesystem /home/ftp.
+ community.general.aix_filesystem:
+ community.general.filesystem: /home/ftp
+ rm_mount_point: yes
+ state: absent
+
+- name: Remove /newfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for aix_filesystems actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._mount import ismount
+import re
+
+
+def _fs_exists(module, filesystem):
+ """
+ Check if file system already exists on /etc/filesystems.
+
+ :param module: Ansible module.
+ :param community.general.filesystem: filesystem name.
+ :return: True or False.
+ """
+ lsfs_cmd = module.get_bin_path('lsfs', True)
+ rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
+ if rc == 1:
+ if re.findall("No record matching", err):
+ return False
+
+ else:
+ module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
+
+ else:
+
+ return True
+
+
+def _check_nfs_device(module, nfs_host, device):
+ """
+ Validate if NFS server is exporting the device (remote export).
+
+ :param module: Ansible module.
+ :param nfs_host: nfs_host parameter, NFS server.
+ :param device: device parameter, remote export.
+ :return: True or False.
+ """
+ showmount_cmd = module.get_bin_path('showmount', True)
+ rc, showmount_out, err = module.run_command(
+ "%s -a %s" % (showmount_cmd, nfs_host))
+ if rc != 0:
+ module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
+ else:
+ showmount_data = showmount_out.splitlines()
+ for line in showmount_data:
+ if line.split(':')[1] == device:
+ return True
+
+ return False
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group %s is in varyoff state." % vg
+ return False, msg
+ elif vg in current_active_vgs:
+ msg = "Volume group %s is in varyon state." % vg
+ return True, msg
+ else:
+ msg = "Volume group %s does not exist." % vg
+ return None, msg
+
+
+def resize_fs(module, filesystem, size):
+ """ Resize LVM file system. """
+
+ chfs_cmd = module.get_bin_path('chfs', True)
+ if not module.check_mode:
+ rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem))
+
+ if rc == 28:
+ changed = False
+ return changed, chfs_out
+ elif rc != 0:
+ if re.findall('Maximum allocation for logical', err):
+ changed = False
+ return changed, err
+ else:
+ module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
+
+ else:
+ if re.findall('The filesystem size is already', chfs_out):
+ changed = False
+ else:
+ changed = True
+
+ return changed, chfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
+ account_subsystem, permissions, nfs_server, attributes):
+ """ Create LVM file system or NFS remote mount point. """
+
+ attributes = ' -a '.join(attributes)
+
+ # Parameters definition.
+ account_subsys_opt = {
+ True: '-t yes',
+ False: '-t no'
+ }
+
+ if nfs_server is not None:
+ auto_mount_opt = {
+ True: '-A',
+ False: '-a'
+ }
+
+ else:
+ auto_mount_opt = {
+ True: '-A yes',
+ False: '-A no'
+ }
+
+ if size is None:
+ size = ''
+ else:
+ size = "-a size=%s" % size
+
+ if device is None:
+ device = ''
+ else:
+ device = "-d %s" % device
+
+ if vg is None:
+ vg = ''
+ else:
+ vg_state, msg = _validate_vg(module, vg)
+ if vg_state:
+ vg = "-g %s" % vg
+ else:
+ changed = False
+
+ return changed, msg
+
+ if mount_group is None:
+ mount_group = ''
+
+ else:
+ mount_group = "-u %s" % mount_group
+
+ auto_mount = auto_mount_opt[auto_mount]
+ account_subsystem = account_subsys_opt[account_subsystem]
+
+ if nfs_server is not None:
+ # Creates a NFS file system.
+ mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
+ if not module.check_mode:
+ rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % (
+ mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "NFS file system %s created." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+ else:
+ # Creates a LVM file system.
+ crfs_cmd = module.get_bin_path('crfs', True)
+ if not module.check_mode:
+ cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % (
+ crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)
+ rc, crfs_out, err = module.run_command(cmd)
+
+ if rc == 10:
+ module.exit_json(
+ msg="Using a existent previously defined logical volume, "
+ "volume group needs to be empty. %s" % err)
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+
+ else:
+ changed = True
+ return changed, crfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def remove_fs(module, filesystem, rm_mount_point):
+ """ Remove an LVM file system or NFS entry. """
+
+ # Command parameters.
+ rm_mount_point_opt = {
+ True: '-r',
+ False: ''
+ }
+
+ rm_mount_point = rm_mount_point_opt[rm_mount_point]
+
+ rmfs_cmd = module.get_bin_path('rmfs', True)
+ if not module.check_mode:
+ cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem)
+ rc, rmfs_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+ else:
+ changed = True
+ msg = rmfs_out
+ if not rmfs_out:
+ msg = "File system %s removed." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def mount_fs(module, filesystem):
+ """ Mount a file system. """
+ mount_cmd = module.get_bin_path('mount', True)
+
+ if not module.check_mode:
+ rc, mount_out, err = module.run_command(
+ "%s %s" % (mount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s mounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def unmount_fs(module, filesystem):
+ """ Unmount a file system."""
+ unmount_cmd = module.get_bin_path('unmount', True)
+
+ if not module.check_mode:
+ rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s unmounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_subsystem=dict(type='bool', default=False),
+ attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ auto_mount=dict(type='bool', default=True),
+ device=dict(type='str'),
+ filesystem=dict(type='str', required=True),
+ fs_type=dict(type='str', default='jfs2'),
+ permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
+ mount_group=dict(type='str'),
+ nfs_server=dict(type='str'),
+ rm_mount_point=dict(type='bool', default=False),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
+ vg=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ account_subsystem = module.params['account_subsystem']
+ attributes = module.params['attributes']
+ auto_mount = module.params['auto_mount']
+ device = module.params['device']
+ fs_type = module.params['fs_type']
+ permissions = module.params['permissions']
+ mount_group = module.params['mount_group']
+ filesystem = module.params['filesystem']
+ nfs_server = module.params['nfs_server']
+ rm_mount_point = module.params['rm_mount_point']
+ size = module.params['size']
+ state = module.params['state']
+ vg = module.params['vg']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'present':
+ fs_mounted = ismount(filesystem)
+ fs_exists = _fs_exists(module, filesystem)
+
+ # Check if fs is mounted or exists.
+ if fs_mounted or fs_exists:
+ result['msg'] = "File system %s already exists." % filesystem
+ result['changed'] = False
+
+ # If parameter size was passed, resize fs.
+ if size is not None:
+ result['changed'], result['msg'] = resize_fs(module, filesystem, size)
+
+ # If fs doesn't exist, create it.
+ else:
+ # Check if fs will be a NFS device.
+ if nfs_server is not None:
+ if device is None:
+ result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from NFS export.
+ if _check_nfs_device(module, nfs_server, device):
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is None:
+ if vg is None:
+ result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is not None and nfs_server is None:
+ # Create a fs from a previously lv device.
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ elif state == 'absent':
+ if ismount(filesystem):
+ result['msg'] = "File system %s mounted." % filesystem
+
+ else:
+ fs_status = _fs_exists(module, filesystem)
+ if not fs_status:
+ result['msg'] = "File system %s does not exist." % filesystem
+ else:
+ result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
+
+ elif state == 'mounted':
+ if ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already mounted." % filesystem
+ else:
+ result['changed'], result['msg'] = mount_fs(module, filesystem)
+
+ elif state == 'unmounted':
+ if not ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already unmounted." % filesystem
+ else:
+ result['changed'], result['msg'] = unmount_fs(module, filesystem)
+
+ else:
+ # Unreachable codeblock
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py
new file mode 100644
index 00000000..c2daface
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Joris Weijters <joris.weijters@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Joris Weijters (@molekuul)
+module: aix_inittab
+short_description: Manages the inittab on AIX
+description:
+ - Manages the inittab on AIX.
+options:
+ name:
+ description:
+ - Name of the inittab entry.
+ type: str
+ required: yes
+ aliases: [ service ]
+ runlevel:
+ description:
+ - Runlevel of the entry.
+ type: str
+ required: yes
+ action:
+ description:
+ - Action what the init has to do with this entry.
+ type: str
+ choices:
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
+ command:
+ description:
+ - What command has to run.
+ type: str
+ required: yes
+ insertafter:
+ description:
+ - After which inittabline should the new entry inserted.
+ type: str
+ state:
+ description:
+ - Whether the entry should be present or absent in the inittab file.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The changes are persistent across reboots.
+ - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
+ - Tested on AIX 7.1.
+requirements:
+- itertools
+'''
+
+EXAMPLES = '''
+# Add service startmyservice to the inittab, directly after service existingservice.
+- name: Add startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 4
+ action: once
+ command: echo hello
+ insertafter: existingservice
+ state: present
+ become: yes
+
+# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
+- name: Change startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: present
+ become: yes
+
+- name: Remove startmyservice from inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: absent
+ become: yes
+'''
+
+RETURN = '''
+name:
+ description: Name of the adjusted inittab entry
+ returned: always
+ type: str
+ sample: startmyservice
+msg:
+ description: Action done with the inittab entry
+ returned: changed
+ type: str
+ sample: changed inittab entry startmyservice
+changed:
+ description: Whether the inittab changed or not
+ returned: always
+ type: bool
+ sample: true
+'''
+
+# Import necessary libraries
+try:
+ # python 2
+ from itertools import izip
+except ImportError:
+ izip = zip
+
+from ansible.module_utils.basic import AnsibleModule
+
+# end import modules
+# start defining the functions
+
+
+def check_current_entry(module):
+ # Check if entry exists, if not return False in exists in return dict,
+ # if true return True and the entry in return dict
+ existsdict = {'exist': False}
+ lsitab = module.get_bin_path('lsitab')
+ (rc, out, err) = module.run_command([lsitab, module.params['name']])
+ if rc == 0:
+ keys = ('name', 'runlevel', 'action', 'command')
+ values = out.split(":")
+ # strip non readable characters as \n
+ values = map(lambda s: s.strip(), values)
+ existsdict = dict(izip(keys, values))
+ existsdict.update({'exist': True})
+ return existsdict
+
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['service']),
+ runlevel=dict(type='str', required=True),
+ action=dict(type='str', choices=[
+ 'boot',
+ 'bootwait',
+ 'hold',
+ 'initdefault',
+ 'off',
+ 'once',
+ 'ondemand',
+ 'powerfail',
+ 'powerwait',
+ 'respawn',
+ 'sysinit',
+ 'wait',
+ ]),
+ command=dict(type='str', required=True),
+ insertafter=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ result = {
+ 'name': module.params['name'],
+ 'changed': False,
+ 'msg': ""
+ }
+
+ # Find commandline strings
+ mkitab = module.get_bin_path('mkitab')
+ rmitab = module.get_bin_path('rmitab')
+ chitab = module.get_bin_path('chitab')
+ rc = 0
+
+ # check if the new entry exists
+ current_entry = check_current_entry(module)
+
+ # if action is install or change,
+ if module.params['state'] == 'present':
+
+ # create new entry string
+ new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
+ ":" + module.params['action'] + ":" + module.params['command']
+
+ # If current entry exists or fields are different(if the entry does not
+ # exists, then the entry wil be created
+ if (not current_entry['exist']) or (
+ module.params['runlevel'] != current_entry['runlevel'] or
+ module.params['action'] != current_entry['action'] or
+ module.params['command'] != current_entry['command']):
+
+ # If the entry does exist then change the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command([chitab, new_entry])
+ if rc != 0:
+ module.fail_json(
+ msg="could not change inittab", rc=rc, err=err)
+ result['msg'] = "changed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ # If the entry does not exist create the entry
+ elif not current_entry['exist']:
+ if module.params['insertafter']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, '-i', module.params['insertafter'], new_entry])
+ else:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, new_entry])
+
+ if rc != 0:
+ module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
+ result['msg'] = "add inittab entry" + " " + module.params['name']
+ result['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # If the action is remove and the entry exists then remove the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [rmitab, module.params['name']])
+ if rc != 0:
+ module.fail_json(
+ msg="could not remove entry from inittab)", rc=rc, err=err)
+ result['msg'] = "removed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py
new file mode 100644
index 00000000..569711f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_lvg
+short_description: Manage LVM volume groups on AIX
+description:
+- This module creates, removes or resize volume groups on AIX LVM.
+options:
+ force:
+ description:
+ - Force volume group creation.
+ type: bool
+ default: no
+ pp_size:
+ description:
+ - The size of the physical partition in megabytes.
+ type: int
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (C(present) state) the volume group.
+ - If not informed reducing (C(absent) state) the volume group will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ type: str
+ choices: [ absent, present, varyoff, varyon ]
+ default: present
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ vg_type:
+ description:
+ - The type of the volume group.
+ type: str
+ choices: [ big, normal, scalable ]
+ default: normal
+notes:
+- AIX will permit remove VG only if all LV/Filesystems are not busy.
+- Module does not modify PP size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ pp_size: 128
+ vg_type: scalable
+ state: present
+
+- name: Removing a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ state: absent
+
+- name: Extending rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: present
+
+- name: Reducing rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _validate_pv(module, vg, pvs):
+ """
+ Function to validate if the physical volume (PV) is not already in use by
+ another volume group or Oracle ASM.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume group name.
+ :param pvs: Physical volume list.
+ :return: [bool, message] or module.fail_json for errors.
+ """
+
+ lspv_cmd = module.get_bin_path('lspv', True)
+ rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
+
+ for pv in pvs:
+ # Get pv list.
+ lspv_list = {}
+ for line in current_lspv.splitlines():
+ pv_data = line.split()
+ lspv_list[pv_data[0]] = pv_data[2]
+
+ # Check if pv exists and is free.
+ if pv not in lspv_list.keys():
+ module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
+
+ if lspv_list[pv] == 'None':
+ # Disk None, looks free.
+ # Check if PV is not already in use by Oracle ASM.
+ lquerypv_cmd = module.get_bin_path('lquerypv', True)
+ rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
+ if rc != 0:
+ module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
+
+ if 'ORCLDISK' in current_lquerypv:
+ module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
+
+ msg = "Physical volume '%s' is ok to be used." % pv
+ return True, msg
+
+ # Check if PV is already in use for the same vg.
+ elif vg != lspv_list[pv]:
+ module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
+
+ msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
+ return False, msg
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group '%s' is in varyoff state." % vg
+ return False, msg
+
+ if vg in current_active_vgs:
+ msg = "Volume group '%s' is in varyon state." % vg
+ return True, msg
+
+ msg = "Volume group '%s' does not exist." % vg
+ return None, msg
+
+
+def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
+ """ Creates or extend a volume group. """
+
+ # Command option parameters.
+ force_opt = {
+ True: '-f',
+ False: ''
+ }
+
+ vg_opt = {
+ 'normal': '',
+ 'big': '-B',
+ 'scalable': '-S',
+ }
+
+ # Validate if PV are not already in use.
+ pv_state, msg = _validate_pv(module, vg, pvs)
+ if not pv_state:
+ changed = False
+ return changed, msg
+
+ vg_state, msg = vg_validation
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is True:
+ # Volume group extension.
+ changed = True
+ msg = ""
+
+ if not module.check_mode:
+ extendvg_cmd = module.get_bin_path('extendvg', True)
+ rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Extending volume group '%s' has failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' extended." % vg
+ return changed, msg
+
+ elif vg_state is None:
+ # Volume group creation.
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ mkvg_cmd = module.get_bin_path('mkvg', True)
+ rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Creating volume group '%s' failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' created." % vg
+ return changed, msg
+
+
+def reduce_vg(module, vg, pvs, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is None:
+ changed = False
+ return changed, msg
+
+ # Define pvs_to_remove (list of physical volumes to be removed).
+ if pvs is None:
+ # Remove VG if pvs are note informed.
+ # Remark: AIX will permit remove only if the VG has not LVs.
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
+
+ pvs_to_remove = []
+ for line in current_pvs.splitlines()[2:]:
+ pvs_to_remove.append(line.split()[0])
+
+ reduce_msg = "Volume group '%s' removed." % vg
+ else:
+ pvs_to_remove = pvs
+ reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
+
+ # Reduce volume group.
+ if len(pvs_to_remove) <= 0:
+ changed = False
+ msg = "No physical volumes to remove."
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ reducevg_cmd = module.get_bin_path('reducevg', True)
+ rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
+ if rc != 0:
+ module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
+
+ msg = reduce_msg
+ return changed, msg
+
+
+def state_vg(module, vg, state, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is None:
+ module.fail_json(msg=msg)
+
+ if state == 'varyon':
+ if vg_state is True:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyonvg', True)
+ rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
+
+ msg = "Varyon volume group %s completed." % vg
+ return changed, msg
+
+ elif state == 'varyoff':
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyoffvg', True)
+ rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
+
+ msg = "Varyoff volume group %s completed." % vg
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', default=False),
+ pp_size=dict(type='int'),
+ pvs=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
+ vg=dict(type='str', required=True),
+ vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
+ ),
+ supports_check_mode=True,
+ )
+
+ force = module.params['force']
+ pp_size = module.params['pp_size']
+ pvs = module.params['pvs']
+ state = module.params['state']
+ vg = module.params['vg']
+ vg_type = module.params['vg_type']
+
+ if pp_size is None:
+ pp_size = ''
+ else:
+ pp_size = "-s %s" % pp_size
+
+ vg_validation = _validate_vg(module, vg)
+
+ if state == 'present':
+ if not pvs:
+ changed = False
+ msg = "pvs is required to state 'present'."
+ module.fail_json(msg=msg)
+ else:
+ changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
+
+ elif state == 'absent':
+ changed, msg = reduce_vg(module, vg, pvs, vg_validation)
+
+ elif state == 'varyon' or state == 'varyoff':
+ changed, msg = state_vg(module, vg, state, vg_validation)
+
+ else:
+ changed = False
+ msg = "Unexpected state"
+
+ module.exit_json(changed=changed, msg=msg, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py
new file mode 100644
index 00000000..02b4f06c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alain Dejoux (@adejoux)
+module: aix_lvol
+short_description: Configure AIX LVM logical volumes
+description:
+ - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ type: str
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ type: str
+ required: true
+ lv_type:
+ description:
+ - The type of the logical volume.
+ type: str
+ default: jfs2
+ size:
+ description:
+ - The size of the logical volume with one of the [MGT] units.
+ type: str
+ copies:
+ description:
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
+ type: int
+ default: 1
+ policy:
+ description:
+ - Sets the interphysical volume allocation policy.
+ - C(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ type: str
+ choices: [ maximum, minimum ]
+ default: maximum
+ state:
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ opts:
+ description:
+ - Free-form options to be passed to the mklv command.
+ type: str
+ pvs:
+ description:
+ - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+- name: Create a logical volume of 512M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+
+- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test2lv
+ size: 512M
+ pvs: [ hdisk1, hdisk2 ]
+
+- name: Create a logical volume of 512M mirrored
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test3lv
+ size: 512M
+ copies: 2
+
+- name: Create a logical volume of 1G with a minimum placement policy
+ community.general.aix_lvol:
+ vg: rootvg
+ lv: test4lv
+ size: 1G
+ policy: minimum
+
+- name: Create a logical volume with special options like mirror pool
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+ opts: -p copy1=poolA -p copy2=poolB
+
+- name: Extend the logical volume to 1200M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test4lv
+ size: 1200M
+
+- name: Remove the logical volume
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ type: str
+ description: A friendly message describing the task result.
+ returned: always
+ sample: Logical volume testlv created.
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def convert_size(module, size):
+ unit = size[-1].upper()
+ units = ['M', 'G', 'T']
+ try:
+ multiplier = 1024 ** units.index(unit)
+ except ValueError:
+ module.fail_json(msg="No valid size unit specified.")
+
+ return int(size[:-1]) * multiplier
+
+
+def round_ppsize(x, base=16):
+ new_size = int(base * round(float(x) / base))
+ if new_size < x:
+ new_size += base
+ return new_size
+
+
+def parse_lv(data):
+ name = None
+
+ for line in data.splitlines():
+ match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ vg = match.group(2)
+ continue
+ match = re.search(r"LPs:\s+(\d+).*PPs", line)
+ if match is not None:
+ lps = int(match.group(1))
+ continue
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+ match = re.search(r"INTER-POLICY:\s+(\w+)", line)
+ if match is not None:
+ policy = match.group(1)
+ continue
+
+ if not name:
+ return None
+
+ size = lps * pp_size
+
+ return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
+
+
+def parse_vg(data):
+
+ for line in data.splitlines():
+
+ match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ continue
+
+ match = re.search(r"TOTAL PP.*\((\d+)", line)
+ if match is not None:
+ size = int(match.group(1))
+ continue
+
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+
+ match = re.search(r"FREE PP.*\((\d+)", line)
+ if match is not None:
+ free = int(match.group(1))
+ continue
+
+ return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str', required=True),
+ lv_type=dict(type='str', default='jfs2'),
+ size=dict(type='str'),
+ opts=dict(type='str', default=''),
+ copies=dict(type='int', default=1),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
+ pvs=dict(type='list', elements='str', default=list())
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ lv_type = module.params['lv_type']
+ size = module.params['size']
+ opts = module.params['opts']
+ copies = module.params['copies']
+ policy = module.params['policy']
+ state = module.params['state']
+ pvs = module.params['pvs']
+
+ pv_list = ' '.join(pvs)
+
+ if policy == 'maximum':
+ lv_policy = 'x'
+ else:
+ lv_policy = 'm'
+
+ # Add echo command when running in check-mode
+ if module.check_mode:
+ test_opt = 'echo '
+ else:
+ test_opt = ''
+
+ # check if system commands are available
+ lsvg_cmd = module.get_bin_path("lsvg", required=True)
+ lslv_cmd = module.get_bin_path("lslv", required=True)
+
+ # Get information on volume group requested
+ rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
+
+ this_vg = parse_vg(vg_info)
+
+ if size is not None:
+ # Calculate pp size and round it up based on pp size.
+ lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
+
+ # Get information on logical volume requested
+ rc, lv_info, err = module.run_command(
+ "%s %s" % (lslv_cmd, lv))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
+
+ changed = False
+
+ this_lv = parse_lv(lv_info)
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ if this_lv is None:
+ if state == 'present':
+ if lv_size > this_vg['free']:
+ module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
+
+ # create LV
+ mklv_cmd = module.get_bin_path("mklv", required=True)
+
+ cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s created." % lv)
+ else:
+ module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ rmlv_cmd = module.get_bin_path("rmlv", required=True)
+ rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
+ else:
+ if this_lv['policy'] != policy:
+ # change lv allocation policy
+ chlv_cmd = module.get_bin_path("chlv", required=True)
+ rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
+ else:
+ module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
+
+ if vg != this_lv['vg']:
+ module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
+
+ # from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
+ if not size:
+ module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
+
+ # resize LV based on absolute values
+ if int(lv_size) > this_lv['size']:
+ extendlv_cmd = module.get_bin_path("extendlv", required=True)
+ cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
+ else:
+ module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
+ elif lv_size < this_lv['size']:
+ module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
+ else:
+ module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py
new file mode 100644
index 00000000..09754ccd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py
@@ -0,0 +1,1013 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
+description:
+ - Create, start, stop, restart, modify or terminate ecs instances.
+ - Add or remove ecs instances to/from security group.
+options:
+ state:
+ description:
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance.
+ If it is not specified, it will be allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
+ It cannot begin with http:// or https://.
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
+ Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name.
+ The sequential suffix ranges from 001 to 999.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password will take effect.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are 40~500.
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ should be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - I(count) determines how many instances based on a specific tag criteria should be present.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section.
+ The specified count_tag must already exist or be passed in as the I(tags) option.
+ If it is not specified, it will be replaced by I(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public ip for the new instance.
+ default: False
+ aliases: [ 'assign_public_ip' ]
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid).
+ - The valid value are [1-9, 12, 24, 36].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: False
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance ids. It is required when need to operate existing instances.
+ If it is specified, I(count) will lose efficacy.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: False
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ If True, it means you have to specify all the desired tags on each task affecting an instance.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
+ It only will take effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
+ places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid)
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the
+ required parameters are set, and validates the request format, service permissions, and available ECS instances.
+ If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: True
+ type: bool
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.19.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# basic provisioning example vpc network
+- name: Basic provisioning example
+ hosts: localhost
+ vars:
+ alicloud_access_key: <your-alicloud-access-key-id>
+ alicloud_secret_key: <your-alicloud-access-secret-key>
+ alicloud_region: cn-beijing
+ image: ubuntu1404_64_40G_cloudinit_20160727.raw
+ instance_type: ecs.n4.small
+ vswitch_id: vsw-abcd1234
+ assign_public_ip: True
+ max_bandwidth_out: 10
+ host_name: myhost
+ password: mypassword
+ system_disk_category: cloud_efficiency
+ system_disk_size: 100
+ internet_charge_type: PayByBandwidth
+ security_groups: ["sg-f2rwnfh23r"]
+
+ instance_ids: ["i-abcd12346", "i-abcd12345"]
+ force: True
+
+ tasks:
+ - name: Launch ECS instance in VPC network
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ vswitch_id: '{{ vswitch_id }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: With count and count_tag to create a number of instances
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ security_groups: '{{ security_groups }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ Version: 0.1
+ count: 2
+ count_tag:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: Start instance
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'running'
+
+ - name: Reboot instance forcibly
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'restarted'
+ force: '{{ force }}'
+
+ - name: Add instances to an security group
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ security_groups: '{{ security_groups }}'
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import re
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def get_instances_info(connection, ids):
+ result = []
+ instances = connection.describe_instances(instance_ids=ids)
+ if len(instances) > 0:
+ for inst in instances:
+ volumes = connection.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ result.append(inst.read())
+ return result
+
+
+def run_instance(module, ecs, exact_count):
+ if exact_count <= 0:
+ return None
+ zone_id = module.params['availability_zone']
+ image_id = module.params['image_id']
+ instance_type = module.params['instance_type']
+ security_groups = module.params['security_groups']
+ vswitch_id = module.params['vswitch_id']
+ instance_name = module.params['instance_name']
+ description = module.params['description']
+ internet_charge_type = module.params['internet_charge_type']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ max_bandwidth_in = module.params['max_bandwidth_in']
+ host_name = module.params['host_name']
+ password = module.params['password']
+ system_disk_category = module.params['system_disk_category']
+ system_disk_size = module.params['system_disk_size']
+ system_disk_name = module.params['system_disk_name']
+ system_disk_description = module.params['system_disk_description']
+ allocate_public_ip = module.params['allocate_public_ip']
+ period = module.params['period']
+ auto_renew = module.params['auto_renew']
+ instance_charge_type = module.params['instance_charge_type']
+ auto_renew_period = module.params['auto_renew_period']
+ user_data = module.params['user_data']
+ key_name = module.params['key_name']
+ ram_role_name = module.params['ram_role_name']
+ spot_price_limit = module.params['spot_price_limit']
+ spot_strategy = module.params['spot_strategy']
+ unique_suffix = module.params['unique_suffix']
+ # check whether the required parameter passed or not
+ if not image_id:
+ module.fail_json(msg='image_id is required for new instance')
+ if not instance_type:
+ module.fail_json(msg='instance_type is required for new instance')
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ if len(security_groups) <= 0:
+ module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
+
+ client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
+
+ try:
+ # call to create_instance method from footmark
+ instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
+ zone_id=zone_id, instance_name=instance_name, description=description,
+ internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
+ internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
+ io_optimized='optimized', system_disk_category=system_disk_category,
+ system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
+ system_disk_description=system_disk_description, vswitch_id=vswitch_id,
+ amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
+ auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
+ user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
+ spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
+
+ except Exception as e:
+ module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
+
+ return instances
+
+
+def modify_instance(module, instance):
+ # According to state to modify instance's some special attribute
+ state = module.params["state"]
+ name = module.params['instance_name']
+ unique_suffix = module.params['unique_suffix']
+ if not name:
+ name = instance.name
+
+ description = module.params['description']
+ if not description:
+ description = instance.description
+
+ host_name = module.params['host_name']
+ if unique_suffix and host_name:
+ suffix = instance.host_name[-3:]
+ host_name = host_name + suffix
+
+ if not host_name:
+ host_name = instance.host_name
+
+ # password can be modified only when restart instance
+ password = ""
+ if state == "restarted":
+ password = module.params['password']
+
+ # userdata can be modified only when instance is stopped
+ setattr(instance, "user_data", instance.describe_user_data())
+ user_data = instance.user_data
+ if state == "stopped":
+ user_data = module.params['user_data'].encode()
+
+ try:
+ return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
+ except Exception as e:
+ module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
+
+
+def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
+ """
+ To verify instance charge type has become expected after modify instance charge type
+ """
+ try:
+ while True:
+ instances = ecs.describe_instances(instance_ids=instance_ids)
+ flag = True
+ for inst in instances:
+ if inst and inst.instance_charge_type != charge_type:
+ flag = False
+ if flag:
+ return
+ timeout -= delay
+ time.sleep(delay)
+ if timeout <= 0:
+ raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
+ except Exception as e:
+ raise e
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ security_groups=dict(type='list', elements='str', aliases=['group_ids']),
+ availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
+ instance_type=dict(type='str', aliases=['type']),
+ image_id=dict(type='str', aliases=['image']),
+ count=dict(type='int', default=1),
+ count_tag=dict(type='str'),
+ vswitch_id=dict(type='str', aliases=['subnet_id']),
+ instance_name=dict(type='str', aliases=['name']),
+ host_name=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
+ max_bandwidth_in=dict(type='int', default=200),
+ max_bandwidth_out=dict(type='int', default=0),
+ system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
+ system_disk_size=dict(type='int', default=40),
+ system_disk_name=dict(type='str'),
+ system_disk_description=dict(type='str'),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
+ description=dict(type='str'),
+ allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
+ instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
+ period=dict(type='int', default=1),
+ auto_renew=dict(type='bool', default=False),
+ instance_ids=dict(type='list', elements='str'),
+ auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
+ key_name=dict(type='str', aliases=['keypair']),
+ user_data=dict(type='str'),
+ ram_role_name=dict(type='str'),
+ spot_price_limit=dict(type='float'),
+ spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
+ unique_suffix=dict(type='bool', default=False),
+ period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
+ dry_run=dict(type='bool', default=False),
+ include_data_disks=dict(type='bool', default=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+ host_name = module.params['host_name']
+ state = module.params['state']
+ instance_ids = module.params['instance_ids']
+ count_tag = module.params['count_tag']
+ count = module.params['count']
+ instance_name = module.params['instance_name']
+ force = module.params['force']
+ zone_id = module.params['availability_zone']
+ key_name = module.params['key_name']
+ tags = module.params['tags']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ instance_charge_type = module.params['instance_charge_type']
+ if instance_charge_type == "PrePaid":
+ module.params['spot_strategy'] = ''
+ changed = False
+
+ instances = []
+ if instance_ids:
+ if not isinstance(instance_ids, list):
+ module.fail_json(msg='The parameter instance_ids should be a list, aborting')
+ instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
+ if not instances:
+ module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
+ "Please check it and try again.".format(instance_ids))
+ elif count_tag:
+ instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
+ elif instance_name:
+ instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
+
+ ids = []
+ if state == 'absent':
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
+ targets.append(inst.id)
+ if ecs.delete_instances(instance_ids=targets, force=force):
+ changed = True
+ ids.extend(targets)
+
+ module.exit_json(changed=changed, ids=ids, instances=[])
+ except Exception as e:
+ module.fail_json(msg='Delete instance got an error: {0}'.format(e))
+
+ if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
+ module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
+ if not module.params['allocate_public_ip']:
+ module.params['max_bandwidth_out'] = 0
+
+ if state == 'present':
+ if not instance_ids:
+ if len(instances) > count:
+ for i in range(0, len(instances) - count):
+ inst = instances[len(instances) - 1]
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
+ "and please stop it or set 'force' as True.".format(inst.id))
+ try:
+ if inst.terminate(force=force):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
+ instances.pop(len(instances) - 1)
+ else:
+ try:
+ if re.search(r"-\[\d+,\d+\]-", host_name):
+ module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
+ 'suffix to the hostname, you can set unique_suffix to True')
+ new_instances = run_instance(module, ecs, count - len(instances))
+ if new_instances:
+ changed = True
+ instances.extend(new_instances)
+ except Exception as e:
+ module.fail_json(msg="Create new instances got an error: {0}".format(e))
+
+ # Security Group join/leave begin
+ security_groups = module.params['security_groups']
+ if security_groups:
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ for inst in instances:
+ existing = inst.security_group_ids['security_group_id']
+ remove = list(set(existing).difference(set(security_groups)))
+ add = list(set(security_groups).difference(set(existing)))
+ for sg in remove:
+ if inst.leave_security_group(sg):
+ changed = True
+ for sg in add:
+ if inst.join_security_group(sg):
+ changed = True
+ # Security Group join/leave ends here
+
+ # Attach/Detach key pair
+ inst_ids = []
+ for inst in instances:
+ if key_name is not None and key_name != inst.key_name:
+ if key_name == "":
+ if inst.detach_key_pair():
+ changed = True
+ else:
+ inst_ids.append(inst.id)
+ if inst_ids:
+ changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
+
+ # Modify instance attribute
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.id not in ids:
+ ids.append(inst.id)
+
+ # Modify instance charge type
+ ids = []
+ for inst in instances:
+ if inst.instance_charge_type != instance_charge_type:
+ ids.append(inst.id)
+ if ids:
+ params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
+ "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
+ "auto_pay": True}
+ if instance_charge_type == 'PrePaid':
+ params['period'] = module.params['period']
+ params['period_unit'] = module.params['period_unit']
+
+ if ecs.modify_instance_charge_type(**params):
+ changed = True
+ wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
+
+ else:
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ if state == 'running':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.status != "running":
+ targets.append(inst.id)
+ ids.append(inst.id)
+ if targets and ecs.start_instances(instance_ids=targets):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Start instances got an error: {0}'.format(e))
+ elif state == 'stopped':
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != "stopped":
+ targets.append(inst.id)
+ if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
+ changed = True
+ ids.extend(targets)
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='Stop instances got an error: {0}'.format(e))
+ elif state == 'restarted':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ targets.append(inst.id)
+ if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
+
+ tags = module.params['tags']
+ if module.params['purge_tags']:
+ for inst in instances:
+ if not tags:
+ tags = inst.tags
+ try:
+ if inst.remove_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+ if tags:
+ for inst in instances:
+ try:
+ if inst.add_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py
new file mode 100644
index 00000000..56db6dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+author:
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
+options:
+ name:
+ description:
+ - The generic name of the link.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
+ required when the alternative I(name) is unknown to the system.
+ type: path
+ priority:
+ description:
+ - The priority of the alternative.
+ type: int
+ default: 50
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = r'''
+- name: Correct java version selected
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: Alternatives link created
+ community.general.alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: Make java 32 bit an alternative with low priority
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
+'''
+
+import os
+import re
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path'),
+ priority=dict(type='int', default=50),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+ priority = params['priority']
+
+ UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
+
+ current_path = None
+ all_alternatives = []
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, _) = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
+ )
+
+ if rc == 0:
+ # Alternatives already exist for this link group
+ # Parse the output to determine the current path of the symlink and
+ # available alternatives
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
+ re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
+
+ match = current_path_regex.search(display_output)
+ if match:
+ current_path = match.group(1)
+ all_alternatives = alternative_regex.findall(display_output)
+
+ if not link:
+ # Read the current symlink target from `update-alternatives --query`
+ # in case we need to install the new alternative before setting it.
+ #
+ # This is only compatible on Debian-based systems, as the other
+ # alternatives don't have --query available
+ rc, query_output, _ = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
+ )
+ if rc == 0:
+ for line in query_output.splitlines():
+ if line.startswith('Link:'):
+ link = line.split()[1]
+ break
+
+ if current_path != path:
+ if module.check_mode:
+ module.exit_json(changed=True, current_path=current_path)
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ if not os.path.exists(path):
+ module.fail_json(msg="Specified path %s does not exist" % path)
+ if not link:
+ module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
+
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError as cpe:
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
new file mode 100644
index 00000000..dcf1656f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+author: Olivier Boukili (@oboukili)
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ type: str
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ default: /balancer-manager/
+ balancer_vhost:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ state:
+ type: str
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ tls:
+ description:
+ - Use https to access balancer management page.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - Validate ssl/tls certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Get all current balancer pool members attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+- name: Get a specific member attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- name: Get attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+
+- name: Enable all balancer pool members
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- name: Step 1
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 2
+ ansible.builtin.wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 3
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+import traceback
+
+BEAUTIFUL_SOUP_IMP_ERR = None
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
+
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ if balancer_member_page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled': 'Dis',
+ 'drained': 'Drn',
+ 'hot_standby': 'Stby',
+ 'ignore_errors': 'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled': '&w_status_D',
+ 'drained': '&w_status_N',
+ 'hot_standby': '&w_status_H',
+ 'ignore_errors': '&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ if response[1]['status'] != 200:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ if page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if apache_version:
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+ else:
+ self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ if not balancer_member_suffix:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+
+ if module.params['state'] is not None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py
new file mode 100644
index 00000000..4cc0ef8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+author:
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
+short_description: Enables/disables a module of the Apache2 webserver.
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+options:
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M).
+ This is optional and usually determined automatically by the common convention of
+ appending C(_module) to I(name) as well as custom exception for popular modules.
+ required: False
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: False
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: False
+requirements: ["a2enmod","a2dismod"]
+'''
+
+EXAMPLES = '''
+- name: Enable the Apache2 module wsgi
+ community.general.apache2_module:
+ state: present
+ name: wsgi
+
+- name: Disables the Apache2 module wsgi
+ community.general.apache2_module:
+ state: absent
+ name: wsgi
+
+- name: Disable default modules for Debian
+ community.general.apache2_module:
+ state: absent
+ name: autoindex
+ force: True
+
+- name: Disable mpm_worker and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ state: absent
+ name: mpm_worker
+ ignore_configcheck: True
+
+- name: Enable dump_io module, which is identified as dumpio_module inside apache2
+ community.general.apache2_module:
+ state: present
+ name: dump_io
+ identifier: dumpio_module
+'''
+
+RETURN = '''
+result:
+ description: message about action taken
+ returned: always
+ type: str
+warnings:
+ description: list of warning messages
+ returned: when needed
+ type: list
+rc:
+ description: return code of underlying command
+ returned: failed
+ type: int
+stdout:
+ description: stdout of underlying command
+ returned: failed
+ type: str
+stderr:
+ description: stderr of underlying command
+ returned: failed
+ type: str
+'''
+
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _run_threaded(module):
+ control_binary = _get_ctl_binary(module)
+
+ result, stdout, stderr = module.run_command("%s -V" % control_binary)
+
+ return bool(re.search(r'threaded:[ ]*yes', stdout))
+
+
+def _get_ctl_binary(module):
+ for command in ['apache2ctl', 'apachectl']:
+ ctl_binary = module.get_bin_path(command)
+ if ctl_binary is not None:
+ return ctl_binary
+
+ module.fail_json(
+ msg="Neither of apache2ctl nor apachctl found."
+ " At least one apache control binary is necessary."
+ )
+
+
+def _module_is_enabled(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command("%s -M" % control_binary)
+
+ if result != 0:
+ error_msg = "Error executing %s: %s" % (control_binary, stderr)
+ if module.params['ignore_configcheck']:
+ if 'AH00534' in stderr and 'mpm_' in module.params['name']:
+ module.warnings.append(
+ "No MPM module loaded! apache2 reload AND other module actions"
+ " will fail if no MPM module is loaded immediately."
+ )
+ else:
+ module.warnings.append(error_msg)
+ return False
+ else:
+ module.fail_json(msg=error_msg)
+
+ searchstring = ' ' + module.params['identifier']
+ return searchstring in stdout
+
+
+def create_apache_identifier(name):
+ """
+ By convention if a module is loaded via name, it appears in apache2ctl -M as
+ name_module.
+
+ Some modules don't follow this convention and we use replacements for those."""
+
+ # a2enmod name replacement to apache2ctl -M names
+ text_workarounds = [
+ ('shib', 'mod_shib'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ]
+
+ # re expressions to extract subparts of names
+ re_workarounds = [
+ ('php', r'^(php\d)\.'),
+ ]
+
+ for a2enmod_spelling, module_name in text_workarounds:
+ if a2enmod_spelling in name:
+ return module_name
+
+ for search, reexpr in re_workarounds:
+ if search in name:
+ try:
+ rematch = re.search(reexpr, name)
+ return rematch.group(1) + '_module'
+ except AttributeError:
+ pass
+
+ return name + '_module'
+
+
+def _set_state(module, state):
+ name = module.params['name']
+ force = module.params['force']
+
+ want_enabled = state == 'present'
+ state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
+ a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
+ success_msg = "Module %s %s" % (name, state_string)
+
+ if _module_is_enabled(module) != want_enabled:
+ if module.check_mode:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+
+ a2mod_binary = module.get_bin_path(a2mod_binary)
+ if a2mod_binary is None:
+ module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+
+ if not want_enabled and force:
+ # force exists only for a2dismod on debian
+ a2mod_binary += ' -f'
+
+ result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
+
+ if _module_is_enabled(module) == want_enabled:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+ else:
+ msg = (
+ 'Failed to set module {name} to {state}:\n'
+ '{stdout}\n'
+ 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
+ 'Consider setting the "identifier" option.'
+ ).format(
+ name=name,
+ state=state_string,
+ stdout=stdout,
+ identifier=module.params['identifier']
+ )
+ module.fail_json(msg=msg,
+ rc=result,
+ stdout=stdout,
+ stderr=stderr)
+ else:
+ module.exit_json(changed=False,
+ result=success_msg,
+ warnings=module.warnings)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ identifier=dict(required=False, type='str'),
+ force=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ignore_configcheck=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.warnings = []
+
+ name = module.params['name']
+ if name == 'cgi' and _run_threaded(module):
+ module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
+
+ if not module.params['identifier']:
+ module.params['identifier'] = create_apache_identifier(module.params['name'])
+
+ if module.params['state'] in ['present', 'absent']:
+ _set_state(module, module.params['state'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py
new file mode 100644
index 00000000..74b738de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>) modules.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+author: "Kevin Brebanov (@kbrebanov)"
+options:
+ available:
+ description:
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
+ if the currently installed package is no longer available from any repository.
+ type: bool
+ default: no
+ name:
+ description:
+ - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ type: list
+ elements: str
+ no_cache:
+ description:
+ - Do not use any local cache path.
+ type: bool
+ default: no
+ version_added: 1.0.0
+ repository:
+ description:
+ - A package repository or multiple repositories.
+ Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
+ - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ default: present
+ choices: [ "present", "absent", "latest", "installed", "removed" ]
+ type: str
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ type: bool
+ default: no
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Update repositories and install foo package
+ community.general.apk:
+ name: foo
+ update_cache: yes
+
+- name: Update repositories and install foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ update_cache: yes
+
+- name: Remove foo package
+ community.general.apk:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ state: absent
+
+- name: Install the package foo
+ community.general.apk:
+ name: foo
+ state: present
+
+- name: Install the packages foo and bar
+ community.general.apk:
+ name: foo,bar
+ state: present
+
+- name: Update repositories and update package foo to latest version
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Update repositories and update packages foo and bar to latest versions
+ community.general.apk:
+ name: foo,bar
+ state: latest
+ update_cache: yes
+
+- name: Update all installed packages to the latest versions
+ community.general.apk:
+ upgrade: yes
+
+- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
+ community.general.apk:
+ available: yes
+ upgrade: yes
+
+- name: Update repositories as a separate step
+ community.general.apk:
+ update_cache: yes
+
+- name: Install package from a specific repository
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+ repository: http://dl-3.alpinelinux.org/alpine/edge/main
+
+- name: Install package without using cache
+ community.general.apk:
+ name: foo
+ state: latest
+ no_cache: yes
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when packages have changed
+ type: list
+ sample: ['package', 'other-package']
+'''
+
+import re
+# Import module snippets.
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_for_packages(stdout):
+ packages = []
+ data = stdout.split('\n')
+ regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
+ for l in data:
+ p = regex.search(l)
+ if p:
+ packages.append(p.group(1))
+ return packages
+
+
+def update_package_db(module, exit):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
+ elif exit:
+ module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
+ else:
+ return True
+
+
+def query_toplevel(module, name):
+ # /etc/apk/world contains a list of top-level packages separated by ' ' or \n
+ # packages may contain repository (@) or version (=<>~) separator characters or start with negation !
+ regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
+ with open('/etc/apk/world') as f:
+ content = f.read().split()
+ for p in content:
+ if regex.search(p):
+ return True
+ return False
+
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"^%s: virtual meta package" % (re.escape(name))
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+
+def upgrade_packages(module, available):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ if available:
+ cmd = "%s --available" % cmd
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
+ if re.search(r'^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def install_packages(module, names, state):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_toplevel(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install + to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ # Check to see if packages are still present because of dependencies
+ for name in installed:
+ if query_package(module, name):
+ rc = 1
+ break
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+
+# ==========================================
+# Main control flow.
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name=dict(type='list', elements='str'),
+ no_cache=dict(default=False, type='bool'),
+ repository=dict(type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ available=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ if p['no_cache']:
+ APK_PATH = "%s --no-cache" % (APK_PATH, )
+
+ # add repositories to the APK_PATH
+ if p['repository']:
+ for r in p['repository']:
+ APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module, not p['name'] and not p['upgrade'])
+
+ if p['upgrade']:
+ upgrade_packages(module, p['available'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py
new file mode 100644
index 00000000..d196e03b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Mikhail Gordeev
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_repo
+short_description: Manage APT repositories via apt-repo
+description:
+ - Manages APT repositories using apt-repo tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+notes:
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in apt-repo tool.
+options:
+ repo:
+ description:
+ - Name of the repository to add or remove.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicates the desired repository state.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ remove_others:
+ description:
+ - Remove other then added repositories
+ - Used if I(state=present)
+ type: bool
+ default: no
+ update:
+ description:
+ - Update the package database after changing repositories.
+ type: bool
+ default: no
+author:
+- Mikhail Gordeev (@obirvalger)
+'''
+
+EXAMPLES = '''
+- name: Remove all repositories
+ community.general.apt_repo:
+ repo: all
+ state: absent
+
+- name: Add repository `Sisysphus` and remove other repositories
+ community.general.apt_repo:
+ repo: Sisysphus
+ state: present
+ remove_others: yes
+
+- name: Add local repository `/space/ALT/Sisyphus` and update package cache
+ community.general.apt_repo:
+ repo: copy:///space/ALT/Sisyphus
+ state: present
+ update: yes
+'''
+
+RETURN = ''' # '''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_REPO_PATH = "/usr/bin/apt-repo"
+
+
+def apt_repo(module, *args):
+ """run apt-repo with args and return its output"""
+ # make args list to use in concatenation
+ args = list(args)
+ rc, out, err = module.run_command([APT_REPO_PATH] + args)
+
+ if rc != 0:
+ module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
+
+ return out
+
+
+def add_repo(module, repo):
+ """add a repository"""
+ apt_repo(module, 'add', repo)
+
+
+def rm_repo(module, repo):
+ """remove a repository"""
+ apt_repo(module, 'rm', repo)
+
+
+def set_repo(module, repo):
+ """add a repository and remove other repositories"""
+ # first add to validate repository
+ apt_repo(module, 'add', repo)
+ apt_repo(module, 'rm', 'all')
+ apt_repo(module, 'add', repo)
+
+
+def update(module):
+ """update package cache"""
+ apt_repo(module, 'update')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ remove_others=dict(type='bool', default=False),
+ update=dict(type='bool', default=False),
+ ),
+ )
+
+ if not os.path.exists(APT_REPO_PATH):
+ module.fail_json(msg='cannot find /usr/bin/apt-repo')
+
+ params = module.params
+ repo = params['repo']
+ state = params['state']
+ old_repositories = apt_repo(module)
+
+ if state == 'present':
+ if params['remove_others']:
+ set_repo(module, repo)
+ else:
+ add_repo(module, repo)
+ elif state == 'absent':
+ rm_repo(module, repo)
+
+ if params['update']:
+ update(module)
+
+ new_repositories = apt_repo(module)
+ changed = old_repositories != new_repositories
+ module.exit_json(changed=changed, repo=repo, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py
new file mode 100644
index 00000000..6b6bb7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov <evg@altlinux.org>
+# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: apt_rpm package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+options:
+ package:
+ description:
+ - list of packages to install, upgrade or remove.
+ required: true
+ aliases: [ name, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - update the package database first C(apt-get update).
+ aliases: [ 'update-cache' ]
+ type: bool
+ default: no
+author:
+- Evgenii Terechkov (@evgkrsk)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: present
+
+- name: Install packages foo and bar
+ community.general.apt_rpm:
+ pkg:
+ - foo
+ - bar
+ state: present
+
+- name: Remove package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# bar will be the updated if a newer version exists
+- name: Update the package database and install bar
+ community.general.apt_rpm:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+import json
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_PATH = "/usr/bin/apt-get"
+RPM_PATH = "/usr/bin/rpm"
+
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ return rc == 0
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command("%s update" % APT_PATH)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db: %s" % err)
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
+ ),
+ )
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package']
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, packages)
+
+ elif p['state'] in ['absent', 'removed']:
+ remove_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py
new file mode 100644
index 00000000..2872b5ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
+# Sponsored by Oomph, Inc. http://www.oomphinc.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: archive
+short_description: Creates a compressed archive of one or more files or trees
+extends_documentation_fragment: files
+description:
+ - Creates or extends an archive.
+ - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
+ - Source files can be deleted after archival by specifying I(remove=True).
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ type: list
+ elements: path
+ required: true
+ format:
+ description:
+ - The type of compression to use.
+ - Support for xz was added in Ansible 2.5.
+ type: str
+ choices: [ bz2, gz, tar, xz, zip ]
+ default: gz
+ dest:
+ description:
+ - The file name of the destination archive. The parent directory must exists on the remote host.
+ - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ type: path
+ exclude_path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
+ type: list
+ elements: path
+ force_archive:
+ description:
+ - Allow you to force the module to treat this as an archive even if only a single file is specified.
+ - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
+ type: bool
+ default: false
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ default: no
+notes:
+ - Requires tarfile, zipfile, gzip and bzip2 packages on target host.
+ - Requires lzma or backports.lzma if using xz format.
+ - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
+seealso:
+- module: ansible.builtin.unarchive
+author:
+- Ben Doherty (@bendoh)
+'''
+
+EXAMPLES = r'''
+- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
+ community.general.archive:
+ path: /path/to/foo
+ dest: /path/to/foo.tgz
+
+- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+ community.general.archive:
+ path: /path/to/foo
+ remove: yes
+
+- name: Create a zip archive of /path/to/foo
+ community.general.archive:
+ path: /path/to/foo
+ format: zip
+
+- name: Create a bz2 archive of multiple files, rooted at /path
+ community.general.archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/bar
+ - /path/to/foo/baz
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/ba*
+ format: bz2
+
+- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.gz
+ format: gz
+
+- name: Create a tar.gz archive of a single file.
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.tar.gz
+ format: gz
+ force_archive: true
+'''
+
+RETURN = r'''
+state:
+ description:
+ The current state of the archived file.
+ If 'absent', then no source files were found and the archive does not exist.
+ If 'compress', then the file source file is in the compressed state.
+ If 'archive', then the source file or paths are currently archived.
+ If 'incomplete', then an archive was created, but not all source paths were found.
+ type: str
+ returned: always
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: str
+ returned: always
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
+expanded_exclude_paths:
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+'''
+
+import bz2
+import filecmp
+import glob
+import gzip
+import io
+import os
+import re
+import shutil
+import tarfile
+import zipfile
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
+
+
+LZMA_IMP_ERR = None
+if PY3:
+ try:
+ import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+else:
+ try:
+ from backports import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ check_mode = module.check_mode
+ paths = params['path']
+ dest = params['dest']
+ b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
+ exclude_paths = params['exclude_path']
+ remove = params['remove']
+
+ b_expanded_paths = []
+ b_expanded_exclude_paths = []
+ fmt = params['format']
+ b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
+ force_archive = params['force_archive']
+ globby = False
+ changed = False
+ state = 'absent'
+
+ # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
+ archive = False
+ b_successes = []
+
+ # Fail early
+ if not HAS_LZMA and fmt == 'xz':
+ module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
+ exception=LZMA_IMP_ERR)
+ module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
+
+ for path in paths:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_path or b'?' in b_path):
+ b_expanded_paths.extend(glob.glob(b_path))
+ globby = True
+
+ # If there are no glob characters the path is added to the expanded paths
+ # whether the path exists or not
+ else:
+ b_expanded_paths.append(b_path)
+
+ # Only attempt to expand the exclude paths if it exists
+ if exclude_paths:
+ for exclude_path in exclude_paths:
+ b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_exclude_path or b'?' in b_exclude_path):
+ b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
+
+ # If there are no glob character the exclude path is added to the expanded
+ # exclude paths whether the path exists or not.
+ else:
+ b_expanded_exclude_paths.append(b_exclude_path)
+
+ if not b_expanded_paths:
+ return module.fail_json(
+ path=', '.join(paths),
+ expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'),
+ msg='Error, no source paths were found'
+ )
+
+ # Only try to determine if we are working with an archive or not if we haven't set archive to true
+ if not force_archive:
+ # If we actually matched multiple files or TRIED to, then
+ # treat this as a multi-file archive
+ archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1
+ else:
+ archive = True
+
+ # Default created file name (for single-file archives) to
+ # <file>.<format>
+ if not b_dest and not archive:
+ b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt)
+
+ # Force archives to specify 'dest'
+ if archive and not b_dest:
+ module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
+
+ b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
+
+ b_archive_paths = []
+ b_missing = []
+ b_arcroot = b''
+
+ for b_path in b_expanded_paths:
+ # Use the longest common directory name among all the files
+ # as the archive root path
+ if b_arcroot == b'':
+ b_arcroot = os.path.dirname(b_path) + b_sep
+ else:
+ for i in range(len(b_arcroot)):
+ if b_path[i] != b_arcroot[i]:
+ break
+
+ if i < len(b_arcroot):
+ b_arcroot = os.path.dirname(b_arcroot[0:i + 1])
+
+ b_arcroot += b_sep
+
+ # Don't allow archives to be created anywhere within paths to be removed
+ if remove and os.path.isdir(b_path):
+ b_path_dir = b_path
+ if not b_path.endswith(b'/'):
+ b_path_dir += b'/'
+
+ if b_dest.startswith(b_path_dir):
+ module.fail_json(
+ path=', '.join(paths),
+ msg='Error, created archive can not be contained in source paths when remove=True'
+ )
+
+ if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths:
+ b_archive_paths.append(b_path)
+ else:
+ b_missing.append(b_path)
+
+ # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
+ if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
+ # Just check the filename to know if it's an archive or simple compressed file
+ if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
+ state = 'archive'
+ else:
+ state = 'compress'
+
+ # Multiple files, or globbiness
+ elif archive:
+ if not b_archive_paths:
+ # No source files were found, but the archive is there.
+ if os.path.lexists(b_dest):
+ state = 'archive'
+ elif b_missing:
+ # SOME source files were found, but not all of them
+ state = 'incomplete'
+
+ archive = None
+ size = 0
+ errors = []
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ if state != 'archive':
+ if check_mode:
+ changed = True
+
+ else:
+ try:
+ # Slightly more difficult (and less efficient!) compression using zipfile module
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+
+ # Easier compression using tarfile module
+ elif fmt == 'gz' or fmt == 'bz2':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
+
+ # python3 tarfile module allows xz format but for python2 we have to create the tarfile
+ # in memory and then compress it with lzma.
+ elif fmt == 'xz':
+ arcfileIO = io.BytesIO()
+ arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
+
+ # Or plain tar archiving
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+
+ b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
+ for b_path in b_archive_paths:
+ if os.path.isdir(b_path):
+ # Recurse into directories
+ for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True):
+ if not b_dirpath.endswith(b_sep):
+ b_dirpath += b_sep
+
+ for b_dirname in b_dirnames:
+ b_fullpath = b_dirpath + b_dirname
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ except Exception as e:
+ errors.append('%s: %s' % (n_fullpath, to_native(e)))
+
+ for b_filename in b_filenames:
+ b_fullpath = b_dirpath + b_filename
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ b_successes.append(b_fullpath)
+ except Exception as e:
+ errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
+ else:
+ path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
+ arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
+ if fmt == 'zip':
+ arcfile.write(path, arcname)
+ else:
+ arcfile.add(path, arcname, recursive=False)
+
+ b_successes.append(b_path)
+
+ except Exception as e:
+ expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt)
+ module.fail_json(
+ msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)),
+ exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ state = 'archive'
+
+ if fmt == 'xz':
+ with lzma.open(b_dest, 'wb') as f:
+ f.write(arcfileIO.getvalue())
+ arcfileIO.close()
+
+ if errors:
+ module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
+
+ if state in ['archive', 'incomplete'] and remove:
+ for b_path in b_successes:
+ try:
+ if os.path.isdir(b_path):
+ shutil.rmtree(b_path)
+ elif not check_mode:
+ os.remove(b_path)
+ except OSError as e:
+ errors.append(to_native(b_path))
+
+ if errors:
+ module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors)
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if not check_mode and os.path.getsize(b_dest) != size:
+ changed = True
+
+ if b_successes and state != 'incomplete':
+ state = 'archive'
+
+ # Simple, single-file compression
+ else:
+ b_path = b_expanded_paths[0]
+
+ # No source or compressed file
+ if not (os.path.exists(b_path) or os.path.lexists(b_dest)):
+ state = 'absent'
+
+ # if it already exists and the source file isn't there, consider this done
+ elif not os.path.lexists(b_path) and os.path.lexists(b_dest):
+ state = 'compress'
+
+ else:
+ if module.check_mode:
+ if not os.path.exists(b_dest):
+ changed = True
+ else:
+ size = 0
+ f_in = f_out = arcfile = None
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ try:
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+ arcfile.write(
+ to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
+ to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
+ )
+ arcfile.close()
+ state = 'archive' # because all zip files are archives
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+ arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
+ arcfile.close()
+ else:
+ f_in = open(b_path, 'rb')
+
+ n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
+ if fmt == 'gz':
+ f_out = gzip.open(n_dest, 'wb')
+ elif fmt == 'bz2':
+ f_out = bz2.BZ2File(n_dest, 'wb')
+ elif fmt == 'xz':
+ f_out = lzma.LZMAFile(n_dest, 'wb')
+ else:
+ raise OSError("Invalid format")
+
+ shutil.copyfileobj(f_in, f_out)
+
+ b_successes.append(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ dest=dest,
+ msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ if f_in:
+ f_in.close()
+ if f_out:
+ f_out.close()
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(b_dest) != size:
+ changed = True
+
+ state = 'compress'
+
+ if remove and not check_mode:
+ try:
+ os.remove(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()
+ )
+
+ try:
+ file_args = module.load_file_common_arguments(params, path=b_dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ params['path'] = b_dest
+ file_args = module.load_file_common_arguments(params)
+
+ if not check_mode:
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(
+ archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
+ dest=dest,
+ changed=changed,
+ state=state,
+ arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
+ missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
+ expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
+ expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py
new file mode 100644
index 00000000..1364a42c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: atomic_container
+short_description: Manage the containers on the atomic host platform
+description:
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+author: "Giuseppe Scrivano (@giuseppe)"
+notes:
+ - Host should support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: True
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: True
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: True
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+
+- name: Install the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: latest
+ mode: system
+ values:
+ - ETCD_NAME=etcd.server
+
+- name: Uninstall the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: absent
+ mode: system
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+
+# import module snippets
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_install(module, mode, rootfs, container, image, values_list, backend):
+ system_list = ["--system"] if mode == 'system' else []
+ user_list = ["--user"] if mode == 'user' else []
+ rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
+ args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, image, values_list):
+ args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name, backend):
+ args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=True)
+
+
+def do_rollback(module, name):
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ mode = module.params['mode']
+ name = module.params['name']
+ image = module.params['image']
+ rootfs = module.params['rootfs']
+ values = module.params['values']
+ backend = module.params['backend']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+ present = name in out
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, mode, rootfs, name, image, values_list, backend)
+ elif state == 'latest':
+ do_update(module, name, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="The container is not present", changed=False)
+ else:
+ do_uninstall(module, name, backend)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mode=dict(default=None, choices=['user', 'system']),
+ name=dict(required=True),
+ image=dict(required=True),
+ rootfs=dict(default=None),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ backend=dict(required=True, choices=['docker', 'ostree']),
+ values=dict(type='list', default=[], elements='str'),
+ ),
+ )
+
+ if module.params['values'] is not None and module.params['mode'] == 'default':
+ module.fail_json(msg="values is supported only with user or system mode")
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py
new file mode 100644
index 00000000..993933e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing C(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [ version ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
+ community.general.atomic_host:
+ revision: latest
+
+- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+ community.general.atomic_host:
+ revision: 23.130
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Already on latest'
+'''
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ revision=dict(type='str', default='latest', aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py
new file mode 100644
index 00000000..c915ed0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should support C(atomic) command.
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: [ 'docker', 'ostree' ]
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: True
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ choices: [ 'absent', 'latest', 'present' ]
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or Stop the container.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = r'''
+- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+ community.general.atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+- name: Pull busybox to the OSTree backend
+ community.general.atomic_image:
+ name: busybox
+ state: latest
+ backend: ostree
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ backend = module.params['backend']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ if backend:
+ if state == 'present' or state == 'latest':
+ args = ['atomic', 'pull', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ out_run = ""
+ if started:
+ args = ['atomic', 'run', "--storage=%s" % backend, image]
+ rc, out_run, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=(out + out_run), changed=changed)
+ elif state == 'absent':
+ args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Unable to find" not in out
+ module.exit_json(msg=out, changed=changed)
+ return
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backend=dict(type='str', choices=['docker', 'ostree']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
+ started=dict(type='bool', default=True),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py
new file mode 100644
index 00000000..260c7ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: awall
+short_description: Manage awall policies
+author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
+description:
+ - This modules allows for enable/disable/activate of I(awall) policies.
+ - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ and activates the configuration on the system.
+options:
+ name:
+ description:
+ - One or more policy names.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the policies should be enabled or disabled.
+ type: str
+ choices: [ disabled, enabled ]
+ default: enabled
+ activate:
+ description:
+ - Activate the new firewall rules.
+ - Can be run with other steps or on its own.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Enable "foo" and "bar" policy
+ community.general.awall:
+ name: [ foo bar ]
+ state: enabled
+
+- name: Disable "foo" and "bar" policy and activate new rules
+ community.general.awall:
+ name:
+ - foo
+ - bar
+ state: disabled
+ activate: no
+
+- name: Activate currently enabled firewall rules
+ community.general.awall:
+ activate: yes
+'''
+
+RETURN = ''' # '''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def activate(module):
+ cmd = "%s activate --force" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
+
+
+def is_policy_enabled(module, name):
+ cmd = "%s list" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
+ return True
+ return False
+
+
+def enable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if not is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already enabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s enable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
+
+
+def disable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already disabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s disable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
+ name=dict(type='list', elements='str'),
+ activate=dict(type='bool', default=False),
+ ),
+ required_one_of=[['name', 'activate']],
+ supports_check_mode=True,
+ )
+
+ global AWALL_PATH
+ AWALL_PATH = module.get_bin_path('awall', required=True)
+
+ p = module.params
+
+ if p['name']:
+ if p['state'] == 'enabled':
+ enable_policy(module, p['name'], p['activate'])
+ elif p['state'] == 'disabled':
+ disable_policy(module, p['name'], p['activate'])
+
+ if p['activate']:
+ if not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="activated awall rules")
+
+ module.fail_json(msg="no action defined")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py
new file mode 100644
index 00000000..ab53d066
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: beadm
+short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
+description:
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS boot environment name.
+ type: str
+ required: True
+ aliases: [ "be" ]
+ snapshot:
+ description:
+ - If specified, the new boot environment will be cloned from the given
+ snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is
+ available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [ absent, activated, mounted, present, unmounted ]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: present
+
+- name: Create ZFS boot environment from existing inactive boot environment
+ community.general.beadm:
+ name: upgrade-be
+ snapshot: be@old
+ state: present
+
+- name: Create ZFS boot environment with compression enabled and description "upgrade"
+ community.general.beadm:
+ name: upgrade-be
+ options: "compression=on"
+ description: upgrade
+ state: present
+
+- name: Delete ZFS boot environment
+ community.general.beadm:
+ name: old-be
+ state: absent
+
+- name: Mount ZFS boot environment on /tmp/be
+ community.general.beadm:
+ name: BE
+ mountpoint: /tmp/be
+ state: mounted
+
+- name: Unmount ZFS boot environment
+ community.general.beadm:
+ name: BE
+ state: unmounted
+
+- name: Activate ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: activated
+'''
+
+RETURN = r'''
+name:
+ description: BE name
+ returned: always
+ type: str
+ sample: pre-upgrade
+snapshot:
+ description: ZFS snapshot to create BE from
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
+description:
+ description: BE description
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
+options:
+ description: BE additional options
+ returned: always
+ type: str
+ sample: compression=on
+mountpoint:
+ description: BE mountpoint
+ returned: always
+ type: str
+ sample: /mnt/be
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+force:
+ description: If forced action is wanted
+ returned: always
+ type: bool
+ sample: False
+'''
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BE(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.snapshot = module.params['snapshot']
+ self.description = module.params['description']
+ self.options = module.params['options']
+ self.mountpoint = module.params['mountpoint']
+ self.state = module.params['state']
+ self.force = module.params['force']
+ self.is_freebsd = os.uname()[0] == 'FreeBSD'
+
+ def _beadm_list(self):
+ cmd = [self.module.get_bin_path('beadm')]
+ cmd.append('list')
+ cmd.append('-H')
+ if '@' in self.name:
+ cmd.append('-s')
+ return self.module.run_command(cmd)
+
+ def _find_be_by_name(self, out):
+ if '@' in self.name:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if(check == []):
+ continue
+ full_name = check[0].split('/')
+ if(full_name == []):
+ continue
+ check[0] = full_name[len(full_name) - 1]
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ else:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ return None
+
+ def exists(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ if self._find_be_by_name(out):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def is_activated(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ if 'R' in line[1]:
+ return True
+ else:
+ if 'R' in line[2]:
+ return True
+
+ return False
+
+ def activate_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('activate')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('create')
+
+ if self.snapshot:
+ cmd.append('-e')
+ cmd.append(self.snapshot)
+
+ if not self.is_freebsd:
+ if self.description:
+ cmd.append('-d')
+ cmd.append(self.description)
+
+ if self.options:
+ cmd.append('-o')
+ cmd.append(self.options)
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def destroy_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('destroy')
+ cmd.append('-F')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_mounted(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ # On FreeBSD, we exclude currently mounted BE on /, as it is
+ # special and can be activated even if it is mounted. That is not
+ # possible with non-root BEs.
+ if line[2] != '-' and line[2] != '/':
+ return True
+ else:
+ if line[3]:
+ return True
+
+ return False
+
+ def mount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('mount')
+ cmd.append(self.name)
+
+ if self.mountpoint:
+ cmd.append(self.mountpoint)
+
+ return self.module.run_command(cmd)
+
+ def unmount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('unmount')
+ if self.force:
+ cmd.append('-f')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['be']),
+ snapshot=dict(type='str'),
+ description=dict(type='str'),
+ options=dict(type='str'),
+ mountpoint=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ be = BE(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = be.name
+ result['state'] = be.state
+
+ if be.snapshot:
+ result['snapshot'] = be.snapshot
+
+ if be.description:
+ result['description'] = be.description
+
+ if be.options:
+ result['options'] = be.options
+
+ if be.mountpoint:
+ result['mountpoint'] = be.mountpoint
+
+ if be.state == 'absent':
+ # beadm on FreeBSD and Solarish systems differs in delete behaviour in
+ # that we are not allowed to delete activated BE on FreeBSD while on
+ # Solarish systems we cannot delete BE if it is mounted. We add mount
+ # check for both platforms as BE should be explicitly unmounted before
+ # being deleted. On FreeBSD, we also check if the BE is activated.
+ if be.exists():
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if be.is_freebsd:
+ if be.is_activated():
+ module.fail_json(msg='Unable to remove active BE!')
+
+ (rc, out, err) = be.destroy_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while destroying BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ else:
+ module.fail_json(msg='Unable to remove BE as it is mounted!')
+
+ elif be.state == 'present':
+ if not be.exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.create_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while creating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'activated':
+ if not be.is_activated():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # On FreeBSD, beadm is unable to activate mounted BEs, so we add
+ # an explicit check for that case.
+ if be.is_freebsd:
+ if be.is_mounted():
+ module.fail_json(msg='Unable to activate mounted BE!')
+
+ (rc, out, err) = be.activate_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while activating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ elif be.state == 'mounted':
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.mount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while mounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'unmounted':
+ if be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.unmount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while unmounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py
new file mode 100644
index 00000000..4c907ea6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: bearychat
+short_description: Send BearyChat notifications
+description:
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
+ via the Incoming Robot integration.
+author: "Jiangge Zhang (@tonyseek)"
+options:
+ url:
+ type: str
+ description:
+ - BearyChat WebHook URL. This authenticates you to the bearychat
+ service. It looks like
+ C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ required: true
+ text:
+ type: str
+ description:
+ - Message to send.
+ markdown:
+ description:
+ - If C(yes), text will be parsed as markdown.
+ default: 'yes'
+ type: bool
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the
+ default channel selected by the I(url).
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. For more information, see
+ https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
+'''
+
+EXAMPLES = """
+- name: Send notification message via BearyChat
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via BearyChat all options
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+ markdown: no
+ channel: "#ansible"
+ attachments:
+ - title: "Ansible on {{ inventory_hostname }}"
+ text: "May the Force be with you."
+ color: "#ffffff"
+ images:
+ - http://example.com/index.png
+"""
+
+RETURN = """
+msg:
+ description: execution result
+ returned: success
+ type: str
+ sample: "OK"
+"""
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def build_payload_for_bearychat(module, text, markdown, channel, attachments):
+ payload = {}
+ if text is not None:
+ payload['text'] = text
+ if markdown is not None:
+ payload['markdown'] = markdown
+ if channel is not None:
+ payload['channel'] = channel
+ if attachments is not None:
+ payload.setdefault('attachments', []).extend(
+ build_payload_for_bearychat_attachment(
+ module, item.get('title'), item.get('text'), item.get('color'),
+ item.get('images'))
+ for item in attachments)
+ payload = 'payload=%s' % module.jsonify(payload)
+ return payload
+
+
+def build_payload_for_bearychat_attachment(module, title, text, color, images):
+ attachment = {}
+ if title is not None:
+ attachment['title'] = title
+ if text is not None:
+ attachment['text'] = text
+ if color is not None:
+ attachment['color'] = color
+ if images is not None:
+ target_images = attachment.setdefault('images', [])
+ if not isinstance(images, (list, tuple)):
+ images = [images]
+ for image in images:
+ if isinstance(image, dict) and 'url' in image:
+ image = {'url': image['url']}
+ elif hasattr(image, 'startswith') and image.startswith('http'):
+ image = {'url': image}
+ else:
+ module.fail_json(
+ msg="BearyChat doesn't have support for this kind of "
+ "attachment image")
+ target_images.append(image)
+ return attachment
+
+
+def do_notify_bearychat(module, url, payload):
+ response, info = fetch_url(module, url, data=payload)
+ if info['status'] != 200:
+ url_info = urlparse(url)
+ obscured_incoming_webhook = urlunparse(
+ (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
+ module.fail_json(
+ msg=" failed to send %s to %s: %s" % (
+ payload, obscured_incoming_webhook, info['msg']))
+
+
+def main():
+ module = AnsibleModule(argument_spec={
+ 'url': dict(type='str', required=True, no_log=True),
+ 'text': dict(type='str'),
+ 'markdown': dict(default=True, type='bool'),
+ 'channel': dict(type='str'),
+ 'attachments': dict(type='list', elements='dict'),
+ })
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ url = module.params['url']
+ text = module.params['text']
+ markdown = module.params['markdown']
+ channel = module.params['channel']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_bearychat(
+ module, text, markdown, channel, attachments)
+ do_notify_bearychat(module, url, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py
new file mode 100644
index 00000000..ea693eb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ type: str
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ aliases: ['name']
+ version:
+ type: str
+ description:
+ - The deployment version.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ state:
+ type: str
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ type: str
+ description:
+ - Name of affected host name. Can be a list.
+ - If not specified, it defaults to the remote system's hostname.
+ required: false
+ aliases: ['host']
+ env:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ type: str
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ type: str
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ type: str
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ deployment_message:
+ type: str
+ description:
+ - Message about the deployment.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ aliases: ['message']
+ version_added: '0.2.0'
+ source_system:
+ type: str
+ description:
+ - Source system used in the requests to the API
+ default: ansible
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
+ register: deployment
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
+'''
+
+# ===========================================
+# Module execution.
+#
+import json
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ deployment_message=dict(required=False, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default=True, type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+ if body.get('hosts') is None:
+ body['hosts'] = [socket.gethostname()]
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
new file mode 100644
index 00000000..80c1c493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_access_key
+short_description: Manages Bitbucket repository access keys
+description:
+ - Manages Bitbucket repository access keys (also called deploy keys).
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public key.
+ type: str
+ label:
+ description:
+ - The key label.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the access key.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Bitbucket OAuth consumer should have permissions to read and administrate account repositories.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create access key
+ community.general.bitbucket_access_key:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ label: 'Bitbucket'
+ state: present
+
+- name: Delete access key
+ community.general.bitbucket_access_key:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ label: Bitbucket
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_key': '`key` is required when the `state` is `present`',
+ 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
+ 'invalid_username_or_repo': 'Invalid `repository` or `username`',
+ 'invalid_key': 'Invalid SSH key or key is already in use',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_deploy_key(module, bitbucket):
+ """
+ Search for an existing deploy key on Bitbucket
+ with the label specified in module param `label`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing deploy key or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through the all response pages in search of deploy key we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
+
+ res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
+
+ if res is not None:
+ return res
+
+ return None
+
+
+def create_deploy_key(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['key'],
+ 'label': module.params['label'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] == 400:
+ module.fail_json(msg=error_messages['invalid_key'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def delete_deploy_key(module, bitbucket, key_id):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ key_id=key_id,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ key=dict(type='str'),
+ label=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ key = module.params['key']
+ state = module.params['state']
+
+ # Check parameters
+ if (key is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_key'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing deploy key (if any)
+ existing_deploy_key = get_existing_deploy_key(module, bitbucket)
+ changed = False
+
+ # Create new deploy key in case it doesn't exists
+ if not existing_deploy_key and (state == 'present'):
+ if not module.check_mode:
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Update deploy key if the old value does not match the new one
+ elif existing_deploy_key and (state == 'present'):
+ if not key.startswith(existing_deploy_key.get('key')):
+ if not module.check_mode:
+ # Bitbucket doesn't support update key for the same label,
+ # so we need to delete the old one first
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Delete deploy key
+ elif existing_deploy_key and (state == 'absent'):
+ if not module.check_mode:
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
new file mode 100644
index 00000000..ab3b7ec4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_key_pair
+short_description: Manages Bitbucket pipeline SSH key pair
+description:
+ - Manages Bitbucket pipeline SSH key pair.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ public_key:
+ description:
+ - The public key.
+ type: str
+ private_key:
+ description:
+ - The private key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the key pair.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create or update SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ public_key: '{{lookup("file", "bitbucket.pub") }}'
+ private_key: '{{lookup("file", "bitbucket") }}'
+ state: present
+
+- name: Remove SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account, repository or SSH key pair was not found',
+ 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_ssh_key_pair(module, bitbucket):
+ """
+ Retrieves an existing ssh key pair from repository
+ specified in module param `repository`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing key pair or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
+ "type": "pipeline_ssh_key_pair"
+ }
+ """
+ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+
+ info, content = bitbucket.request(
+ api_url=api_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ # Account, repository or SSH key pair was not found.
+ return None
+
+ return content
+
+
+def update_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='PUT',
+ data={
+ 'private_key': module.params['private_key'],
+ 'public_key': module.params['public_key'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
+
+
+def delete_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ public_key=dict(type='str'),
+ private_key=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ state = module.params['state']
+ public_key = module.params['public_key']
+ private_key = module.params['private_key']
+
+ # Check parameters
+ if ((public_key is None) or (private_key is None)) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_keys'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing ssh key
+ key_pair = get_existing_ssh_key_pair(module, bitbucket)
+ changed = False
+
+ # Create or update key pair
+ if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
+ if not module.check_mode:
+ update_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ # Delete key pair
+ elif key_pair and (state == 'absent'):
+ if not module.check_mode:
+ delete_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
new file mode 100644
index 00000000..dba9f9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_known_host
+short_description: Manages Bitbucket pipeline known hosts
+description:
+ - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+author:
+ - Evgeniy Krysanov (@catcombo)
+requirements:
+ - paramiko
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The FQDN of the known host.
+ type: str
+ required: true
+ key:
+ description:
+ - The public key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the record.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create known hosts from the list
+ community.general.bitbucket_pipeline_known_host:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - bitbucket.org
+ - example.com
+
+- name: Remove known host
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ state: absent
+
+- name: Specify public key file
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import socket
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account or repository was not found',
+ 'unknown_key_type': 'Public key type is unknown',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'known-host-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'known-host-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_known_host(module, bitbucket):
+ """
+ Search for a host in Bitbucket pipelines known hosts
+ with the name specified in module param `name`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing host or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through all response pages in search of hostname we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
+
+ host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
+
+ if host is not None:
+ return host
+
+ return None
+
+
+def get_host_key(module, hostname):
+ """
+ Fetches public key for specified host
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param hostname: host name
+ :return: key type and key content
+ :rtype: tuple
+
+ Return example::
+
+ (
+ 'ssh-rsa',
+ 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
+ )
+ """
+ try:
+ sock = socket.socket()
+ sock.connect((hostname, 22))
+ except socket.error:
+ module.fail_json(msg='Error opening socket to {0}'.format(hostname))
+
+ try:
+ trans = paramiko.transport.Transport(sock)
+ trans.start_client()
+ host_key = trans.get_remote_server_key()
+ except paramiko.SSHException:
+ module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
+
+ trans.close()
+ sock.close()
+
+ key_type = host_key.get_name()
+ key = host_key.get_base64()
+
+ return key_type, key
+
+
+def create_known_host(module, bitbucket):
+ hostname = module.params['name']
+ key_param = module.params['key']
+
+ if key_param is None:
+ key_type, key = get_host_key(module, hostname)
+ elif ' ' in key_param:
+ key_type, key = key_param.split(' ', 1)
+ else:
+ module.fail_json(msg=error_messages['unknown_key_type'])
+
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'hostname': hostname,
+ 'public_key': {
+ 'key_type': key_type,
+ 'key': key,
+ }
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
+ hostname=module.params['hostname'],
+ info=info,
+ ))
+
+
+def delete_known_host(module, bitbucket, known_host_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ known_host_uuid=known_host_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
+ hostname=module.params['name'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ key=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if (module.params['key'] is None) and (not HAS_PARAMIKO):
+ module.fail_json(msg='`paramiko` package not found, please install it.')
+
+ bitbucket = BitbucketHelper(module)
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing known host
+ existing_host = get_existing_known_host(module, bitbucket)
+ state = module.params['state']
+ changed = False
+
+ # Create new host in case it doesn't exists
+ if not existing_host and (state == 'present'):
+ if not module.check_mode:
+ create_known_host(module, bitbucket)
+ changed = True
+
+ # Delete host
+ elif existing_host and (state == 'absent'):
+ if not module.check_mode:
+ delete_known_host(module, bitbucket, existing_host['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
new file mode 100644
index 00000000..33457fca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_variable
+short_description: Manages Bitbucket pipeline variables
+description:
+ - Manages Bitbucket pipeline variables.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The pipeline variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - The pipeline variable value.
+ type: str
+ secured:
+ description:
+ - Whether to encrypt the variable value.
+ type: bool
+ default: no
+ state:
+ description:
+ - Indicates desired state of the variable.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+ - For secured values return parameter C(changed) is always C(True).
+'''
+
+EXAMPLES = r'''
+- name: Create or update pipeline variables from the list
+ community.general.bitbucket_pipeline_variable:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item.name }}'
+ value: '{{ item.value }}'
+ secured: '{{ item.secured }}'
+ state: present
+ with_items:
+ - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False }
+ - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True }
+
+- name: Remove pipeline variable
+ community.general.bitbucket_pipeline_variable:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: AWS_ACCESS_KEY
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, _load_params
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_value': '`value` is required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'pipeline-variable-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'pipeline-variable-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_pipeline_variable(module, bitbucket):
+ """
+ Search for a pipeline variable
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing variable or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'name': 'AWS_ACCESS_OBKEY_ID',
+ 'value': 'x7HU80-a2',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
+ }
+
+ The `value` key in dict is absent in case of secured variable.
+ """
+ variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ # Look through the all response pages in search of variable we need
+ page = 1
+ while True:
+ next_url = "%s?page=%s" % (variables_base_url, page)
+ info, content = bitbucket.request(
+ api_url=next_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
+
+ # We are at the end of list
+ if 'pagelen' in content and content['pagelen'] == 0:
+ return None
+
+ page += 1
+ var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
+
+ if var is not None:
+ var['name'] = var.pop('key')
+ return var
+
+ return None
+
+
+def create_pipeline_variable(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['name'],
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def update_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='PUT',
+ data={
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def delete_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+class BitBucketPipelineVariable(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ params = _load_params() or {}
+ if params.get('secured'):
+ kwargs['argument_spec']['value'].update({'no_log': True})
+ super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ secured=dict(type='bool', default=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = BitBucketPipelineVariable(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ value = module.params['value']
+ state = module.params['state']
+ secured = module.params['secured']
+
+ # Check parameters
+ if (value is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_value'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing pipeline variable (if any)
+ existing_variable = get_existing_pipeline_variable(module, bitbucket)
+ changed = False
+
+ # Create new variable in case it doesn't exists
+ if not existing_variable and (state == 'present'):
+ if not module.check_mode:
+ create_pipeline_variable(module, bitbucket)
+ changed = True
+
+ # Update variable if it is secured or the old value does not match the new one
+ elif existing_variable and (state == 'present'):
+ if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
+ if not module.check_mode:
+ update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ # Delete variable
+ elif existing_variable and (state == 'absent'):
+ if not module.check_mode:
+ delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py
new file mode 100644
index 00000000..911d99b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+author: "Michael Warkentin (@mwarkentin)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a bower package to install
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ type: bool
+ default: 'no'
+ production:
+ description:
+ - Install with --production flag
+ type: bool
+ default: 'no'
+ path:
+ type: path
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ type: path
+ description:
+ - Relative path to bower executable from install path
+ state:
+ type: str
+ description:
+ - The state of the bower package
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ type: str
+ description:
+ - The version to be installed
+'''
+
+EXAMPLES = '''
+- name: Install "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ community.general.bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ community.general.bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ community.general.bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: no
+- community.general.bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
+'''
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default=False, type='bool'),
+ production=dict(default=False, type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = module.params['path']
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if missing:
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if missing or outdated:
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py
new file mode 100644
index 00000000..8be17d6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+options:
+ executable:
+ type: str
+ description:
+ - The path to the bundler executable
+ state:
+ type: str
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ choices: [present, latest]
+ default: present
+ chdir:
+ type: path
+ description:
+ - The directory to execute the bundler commands from. This directory
+ needs to contain a valid Gemfile or .bundle/ directory
+ - If not specified, it will default to the temporary working directory
+ exclude_groups:
+ type: list
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ type: bool
+ default: 'no'
+ gemfile:
+ type: path
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - If not specified it will default to the Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ type: bool
+ default: 'no'
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will install gems in
+ ./vendor/bundle instead of the default location. Requires a Gemfile.lock
+ file to have been created prior
+ type: bool
+ default: 'no'
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ type: bool
+ default: 'yes'
+ gem_path:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ - If not specified the default RubyGems gem paths will be used.
+ binstub_directory:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ extra_args:
+ type: str
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES = '''
+- name: Install gems from a Gemfile in the current directory
+ community.general.bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
+
+- name: Exclude the production group from installing
+ community.general.bundler:
+ state: present
+ exclude_groups: production
+
+- name: Install gems into ./vendor/bundle
+ community.general.bundler:
+ state: present
+ deployment_mode: yes
+
+- name: Install gems using a Gemfile in another directory
+ community.general.bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
+
+- name: Update Gemfile in another directory
+ community.general.bundler:
+ state: latest
+ chdir: ~/rails_project
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ result = module.params.get('executable').split(' ')
+ else:
+ result = [module.get_bin_path('bundle', True)]
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py
new file mode 100644
index 00000000..7af3f279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: bzr
+author:
+- André Paramés (@andreparames)
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+options:
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [ parent ]
+ required: yes
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: yes
+ version:
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ default: head
+ force:
+ description:
+ - If C(yes), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was C(yes).
+ type: bool
+ default: 'no'
+ executable:
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+'''
+
+EXAMPLES = '''
+- name: Checkout
+ community.general.bzr:
+ name: bzr+ssh://foosball.example.org/path/to/branch
+ dest: /srv/checkout
+ version: 22
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=no).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', required=True),
+ name=dict(type='str', required=True, aliases=['parent']),
+ version=dict(type='str', default='head'),
+ force=dict(type='bool', default=False),
+ executable=dict(type='str'),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err = (0, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py
new file mode 100644
index 00000000..c6848238
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: campfire
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+options:
+ subscription:
+ type: str
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ notify:
+ type: str
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
+
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
+'''
+
+try:
+ from html import escape as html_escape
+except ImportError:
+ # Python-3.2 or later
+ import cgi
+
+ def html_escape(text, quote=True):
+ return cgi.escape(text, quote)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py
new file mode 100644
index 00000000..ac6dde67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: yes
+ aliases: [ key ]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ type: str
+ required: yes
+ aliases: [ cap ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags into the effective set,
+ so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare,
+ so you will want to ensure that your capabilities argument matches the final capabilities.
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Set cap_sys_chroot+ep on /foo
+ community.general.capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
+
+- name: Remove cap_net_bind_service from /bar
+ community.general.capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+OPS = ('=', '-', '+')
+
+
+class CapabilitiesModule(object):
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [cap[0] for cap in current]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
+ # add new cap with correct op/flags
+ current.append(self.capability_tup)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not exist, the stderr will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or stderr != "":
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append((subcap, op, flags))
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([''.join(cap) for cap in caps])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except Exception:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+
+# ==============================================================
+# main
+
+def main():
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='str', required=True, aliases=['key']),
+ capability=dict(type='str', required=True, aliases=['cap']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ CapabilitiesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py
new file mode 100644
index 00000000..13833620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jonathan Mainguy <jon@soh.re>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# basis of code taken from the ansible twillio and nexmo modules
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: catapult
+short_description: Send a sms / mms using the catapult bandwidth api
+description:
+ - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+options:
+ src:
+ type: str
+ description:
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ required: true
+ dest:
+ type: list
+ elements: str
+ description:
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ required: true
+ msg:
+ type: str
+ description:
+ - The contents of the text message (must be 2048 characters or less).
+ required: true
+ media:
+ type: str
+ description:
+ - For MMS messages, a media url to the location of the media to be sent with the message.
+ user_id:
+ type: str
+ description:
+ - User Id from Api account page.
+ required: true
+ api_token:
+ type: str
+ description:
+ - Api Token from Api account page.
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Api Secret from Api account page.
+ required: true
+
+author: "Jonathan Mainguy (@Jmainguy)"
+notes:
+ - Will return changed even if the media url is wrong.
+ - Will return changed if the destination number is invalid.
+
+'''
+
+EXAMPLES = '''
+- name: Send a mms to multiple users
+ community.general.catapult:
+ src: "+15035555555"
+ dest:
+ - "+12525089000"
+ - "+12018994225"
+ media: "http://example.com/foobar.jpg"
+ msg: "Task is complete"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+- name: Send a sms to a single user
+ community.general.catapult:
+ src: "+15035555555"
+ dest: "+12018994225"
+ msg: "Consider yourself notified"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+'''
+
+RETURN = '''
+changed:
+ description: Whether the api accepted the message.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def send(module, src, dest, msg, media, user_id, api_token, api_secret):
+ """
+ Send the message
+ """
+ AGENT = "Ansible"
+ URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
+ data = {'from': src, 'to': dest, 'text': msg}
+ if media:
+ data['media'] = media
+
+ headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = api_token.replace('\n', '')
+ module.params['url_password'] = api_secret.replace('\n', '')
+
+ return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(required=True),
+ dest=dict(required=True, type='list', elements='str'),
+ msg=dict(required=True),
+ user_id=dict(required=True),
+ api_token=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ media=dict(default=None, required=False),
+ ),
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ msg = module.params['msg']
+ media = module.params['media']
+ user_id = module.params['user_id']
+ api_token = module.params['api_token']
+ api_secret = module.params['api_secret']
+
+ for number in dest:
+ rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
+ if info["status"] != 201:
+ body = json.loads(info["body"])
+ fail_msg = body["message"]
+ module.fail_json(msg=fail_msg)
+
+ changed = True
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py
new file mode 100644
index 00000000..27d23168
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+requirements:
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
+notes:
+ - Check mode isn't supported.
+options:
+ api_key:
+ type: str
+ description:
+ - Circonus API key
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start
+ - If not specified, it defaults to I(now).
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end
+ - If not specified, it defaults to I(now) + I(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation
+ default: 0
+'''
+EXAMPLES = '''
+- name: Create a simple annotation event with a source, defaults to start and end time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+
+- name: Create an annotation with a duration of 5 minutes and a default start time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ duration: 300
+
+- name: Create an annotation with a start_time and end_time
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+
+RETURN = '''
+annotation:
+ description: details about the created annotation
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: annotation identifier
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: creation timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: last modification timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: last modified by
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: category of the created annotation
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: title of the created annotation
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: description of the created annotation
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: timestamp, since annotation applies
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: timestamp, since annotation ends
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+'''
+import json
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def check_requests_dep(module):
+ """Check if an adequate requests version is available"""
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ else:
+ required_version = '2.0.0' if PY3 else '1.0.0'
+ if LooseVersion(requests.__version__) < LooseVersion(required_version):
+ module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
+
+
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ duration = module.params['duration']
+ if module.params['start'] is not None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] is not None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time()) + duration
+ annotation['start'] = start
+ annotation['stop'] = stop
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+
+
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(type='int'),
+ stop=dict(type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(default=0, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+
+ check_requests_dep(module)
+
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException as e:
+ module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=True, annotation=resp.json())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
new file mode 100644
index 00000000..a2750937
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ type: str
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - This option does nothing and will be removed in community.general 3.0.0.
+ type: bool
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk:
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'),
+ exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
new file mode 100644
index 00000000..7a10c0b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ type: str
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ type: str
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ type: list
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ type: str
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ type: str
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ type: int
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ id=dict(),
+ alias=dict(required=True),
+ alert_recipients=dict(type='list'),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str'),
+ threshold=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
new file mode 100644
index 00000000..c45ca919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ type: list
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ type: str
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ type: dict
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or uninstall the package. Currently it supports only "present" for install action.
+ type: str
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: str
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ community.general.clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True), # @FIXME should be bool?
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
new file mode 100644
index 00000000..105d793c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
@@ -0,0 +1,584 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall policies on Centurylink Cloud
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present'
+ type: list
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ type: list
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ type: list
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ type: str
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ type: str
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ type: str
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: str
+ default: 'True'
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ type: str
+ choices: [True, False]
+ default: True
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: str
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True),
+ destination_account_alias=dict(),
+ firewall_policy_id=dict(),
+ ports=dict(type='list'),
+ source=dict(type='list'),
+ destination=dict(type='list'),
+ wait=dict(default=True), # @FIXME type=bool
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(default=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py
new file mode 100644
index 00000000..a80cc400
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+options:
+ name:
+ description:
+ - The name of the Server Group
+ type: str
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ type: str
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ type: str
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ type: str
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+# Delete a Server Group
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ if not self.root_group:
+ raise AssertionError("Implementation Error: Root Group not set")
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
new file mode 100644
index 00000000..2a8d2e9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
@@ -0,0 +1,935 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ type: str
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ type: str
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ type: str
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ type: str
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ type: str
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ type: str
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ type: list
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ type: str
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ type: str
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node not in nodes:
+ changed = True
+ nodes.append(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py
new file mode 100644
index 00000000..3c1b08cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py
@@ -0,0 +1,965 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ type: list
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ type: str
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Set the cpu count to 4 on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: Set the memory to 8GB on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: Set the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: Remove the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: Add the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: Remove the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py
new file mode 100644
index 00000000..e31546b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ type: str
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ type: list
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ type: list
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py
new file mode 100644
index 00000000..6b7e9c4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py
@@ -0,0 +1,1557 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ type: list
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ type: bool
+ default: 'no'
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ type: str
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ default: 1
+ type: int
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
+ type: str
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ type: int
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ type: str
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ type: list
+ default: []
+ description:
+ description:
+ - The description to set for the server.
+ type: str
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ type: int
+ group:
+ description:
+ - The Server Group to create servers under.
+ type: str
+ default: 'Default Group'
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ type: str
+ location:
+ description:
+ - The Datacenter to create servers in.
+ type: str
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ type: bool
+ default: 'no'
+ required: False
+ memory:
+ description:
+ - Memory in GB.
+ type: int
+ default: 1
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ type: str
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ type: str
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ type: list
+ default: []
+ password:
+ description:
+ - Password for the administrator / root user
+ type: str
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ type: str
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ type: str
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ type: list
+ default: []
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ type: str
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ type: list
+ default: []
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ type: str
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ type: str
+ type:
+ description:
+ - The type of server to create.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ type: str
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ type: str
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1, type='int'),
+ memory=dict(default=1, type='int'),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None, no_log=True),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException:
+ module.fail_json(msg="Unable to find location: {0}".format(location))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ time.sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
new file mode 100644
index 00000000..1d289f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ type: list
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ type: int
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ type: str
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7, type='int'),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
new file mode 100644
index 00000000..09754ccd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
@@ -0,0 +1,1013 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
+description:
+ - Create, start, stop, restart, modify or terminate ecs instances.
+ - Add or remove ecs instances to/from security group.
+options:
+ state:
+ description:
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance.
+ If it is not specified, it will be allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
+ It cannot begin with http:// or https://.
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
+ Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name.
+ The sequential suffix ranges from 001 to 999.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password will take effect.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are 40~500.
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ should be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - I(count) determines how many instances based on a specific tag criteria should be present.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section.
+ The specified count_tag must already exist or be passed in as the I(tags) option.
+ If it is not specified, it will be replaced by I(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public ip for the new instance.
+ default: False
+ aliases: [ 'assign_public_ip' ]
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid).
+ - The valid value are [1-9, 12, 24, 36].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: False
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance ids. It is required when need to operate existing instances.
+ If it is specified, I(count) will lose efficacy.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: False
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ If True, it means you have to specify all the desired tags on each task affecting an instance.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
+ It only will take effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
+ places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid)
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the
+ required parameters are set, and validates the request format, service permissions, and available ECS instances.
+ If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: True
+ type: bool
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.19.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# basic provisioning example vpc network
+- name: Basic provisioning example
+ hosts: localhost
+ vars:
+ alicloud_access_key: <your-alicloud-access-key-id>
+ alicloud_secret_key: <your-alicloud-access-secret-key>
+ alicloud_region: cn-beijing
+ image: ubuntu1404_64_40G_cloudinit_20160727.raw
+ instance_type: ecs.n4.small
+ vswitch_id: vsw-abcd1234
+ assign_public_ip: True
+ max_bandwidth_out: 10
+ host_name: myhost
+ password: mypassword
+ system_disk_category: cloud_efficiency
+ system_disk_size: 100
+ internet_charge_type: PayByBandwidth
+ security_groups: ["sg-f2rwnfh23r"]
+
+ instance_ids: ["i-abcd12346", "i-abcd12345"]
+ force: True
+
+ tasks:
+ - name: Launch ECS instance in VPC network
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ vswitch_id: '{{ vswitch_id }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: With count and count_tag to create a number of instances
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ security_groups: '{{ security_groups }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ Version: 0.1
+ count: 2
+ count_tag:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: Start instance
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'running'
+
+ - name: Reboot instance forcibly
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'restarted'
+ force: '{{ force }}'
+
+ - name: Add instances to an security group
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ security_groups: '{{ security_groups }}'
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import re
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def get_instances_info(connection, ids):
+ result = []
+ instances = connection.describe_instances(instance_ids=ids)
+ if len(instances) > 0:
+ for inst in instances:
+ volumes = connection.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ result.append(inst.read())
+ return result
+
+
+def run_instance(module, ecs, exact_count):
+ if exact_count <= 0:
+ return None
+ zone_id = module.params['availability_zone']
+ image_id = module.params['image_id']
+ instance_type = module.params['instance_type']
+ security_groups = module.params['security_groups']
+ vswitch_id = module.params['vswitch_id']
+ instance_name = module.params['instance_name']
+ description = module.params['description']
+ internet_charge_type = module.params['internet_charge_type']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ max_bandwidth_in = module.params['max_bandwidth_in']
+ host_name = module.params['host_name']
+ password = module.params['password']
+ system_disk_category = module.params['system_disk_category']
+ system_disk_size = module.params['system_disk_size']
+ system_disk_name = module.params['system_disk_name']
+ system_disk_description = module.params['system_disk_description']
+ allocate_public_ip = module.params['allocate_public_ip']
+ period = module.params['period']
+ auto_renew = module.params['auto_renew']
+ instance_charge_type = module.params['instance_charge_type']
+ auto_renew_period = module.params['auto_renew_period']
+ user_data = module.params['user_data']
+ key_name = module.params['key_name']
+ ram_role_name = module.params['ram_role_name']
+ spot_price_limit = module.params['spot_price_limit']
+ spot_strategy = module.params['spot_strategy']
+ unique_suffix = module.params['unique_suffix']
+ # check whether the required parameter passed or not
+ if not image_id:
+ module.fail_json(msg='image_id is required for new instance')
+ if not instance_type:
+ module.fail_json(msg='instance_type is required for new instance')
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ if len(security_groups) <= 0:
+ module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
+
+ client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
+
+ try:
+ # call to create_instance method from footmark
+ instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
+ zone_id=zone_id, instance_name=instance_name, description=description,
+ internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
+ internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
+ io_optimized='optimized', system_disk_category=system_disk_category,
+ system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
+ system_disk_description=system_disk_description, vswitch_id=vswitch_id,
+ amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
+ auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
+ user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
+ spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
+
+ except Exception as e:
+ module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
+
+ return instances
+
+
+def modify_instance(module, instance):
+ # According to state to modify instance's some special attribute
+ state = module.params["state"]
+ name = module.params['instance_name']
+ unique_suffix = module.params['unique_suffix']
+ if not name:
+ name = instance.name
+
+ description = module.params['description']
+ if not description:
+ description = instance.description
+
+ host_name = module.params['host_name']
+ if unique_suffix and host_name:
+ suffix = instance.host_name[-3:]
+ host_name = host_name + suffix
+
+ if not host_name:
+ host_name = instance.host_name
+
+ # password can be modified only when restart instance
+ password = ""
+ if state == "restarted":
+ password = module.params['password']
+
+ # userdata can be modified only when instance is stopped
+ setattr(instance, "user_data", instance.describe_user_data())
+ user_data = instance.user_data
+ if state == "stopped":
+ user_data = module.params['user_data'].encode()
+
+ try:
+ return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
+ except Exception as e:
+ module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
+
+
+def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
+ """
+ To verify instance charge type has become expected after modify instance charge type
+ """
+ try:
+ while True:
+ instances = ecs.describe_instances(instance_ids=instance_ids)
+ flag = True
+ for inst in instances:
+ if inst and inst.instance_charge_type != charge_type:
+ flag = False
+ if flag:
+ return
+ timeout -= delay
+ time.sleep(delay)
+ if timeout <= 0:
+ raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
+ except Exception as e:
+ raise e
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ security_groups=dict(type='list', elements='str', aliases=['group_ids']),
+ availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
+ instance_type=dict(type='str', aliases=['type']),
+ image_id=dict(type='str', aliases=['image']),
+ count=dict(type='int', default=1),
+ count_tag=dict(type='str'),
+ vswitch_id=dict(type='str', aliases=['subnet_id']),
+ instance_name=dict(type='str', aliases=['name']),
+ host_name=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
+ max_bandwidth_in=dict(type='int', default=200),
+ max_bandwidth_out=dict(type='int', default=0),
+ system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
+ system_disk_size=dict(type='int', default=40),
+ system_disk_name=dict(type='str'),
+ system_disk_description=dict(type='str'),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
+ description=dict(type='str'),
+ allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
+ instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
+ period=dict(type='int', default=1),
+ auto_renew=dict(type='bool', default=False),
+ instance_ids=dict(type='list', elements='str'),
+ auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
+ key_name=dict(type='str', aliases=['keypair']),
+ user_data=dict(type='str'),
+ ram_role_name=dict(type='str'),
+ spot_price_limit=dict(type='float'),
+ spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
+ unique_suffix=dict(type='bool', default=False),
+ period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
+ dry_run=dict(type='bool', default=False),
+ include_data_disks=dict(type='bool', default=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+ host_name = module.params['host_name']
+ state = module.params['state']
+ instance_ids = module.params['instance_ids']
+ count_tag = module.params['count_tag']
+ count = module.params['count']
+ instance_name = module.params['instance_name']
+ force = module.params['force']
+ zone_id = module.params['availability_zone']
+ key_name = module.params['key_name']
+ tags = module.params['tags']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ instance_charge_type = module.params['instance_charge_type']
+ if instance_charge_type == "PrePaid":
+ module.params['spot_strategy'] = ''
+ changed = False
+
+ instances = []
+ if instance_ids:
+ if not isinstance(instance_ids, list):
+ module.fail_json(msg='The parameter instance_ids should be a list, aborting')
+ instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
+ if not instances:
+ module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
+ "Please check it and try again.".format(instance_ids))
+ elif count_tag:
+ instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
+ elif instance_name:
+ instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
+
+ ids = []
+ if state == 'absent':
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
+ targets.append(inst.id)
+ if ecs.delete_instances(instance_ids=targets, force=force):
+ changed = True
+ ids.extend(targets)
+
+ module.exit_json(changed=changed, ids=ids, instances=[])
+ except Exception as e:
+ module.fail_json(msg='Delete instance got an error: {0}'.format(e))
+
+ if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
+ module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
+ if not module.params['allocate_public_ip']:
+ module.params['max_bandwidth_out'] = 0
+
+ if state == 'present':
+ if not instance_ids:
+ if len(instances) > count:
+ for i in range(0, len(instances) - count):
+ inst = instances[len(instances) - 1]
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
+ "and please stop it or set 'force' as True.".format(inst.id))
+ try:
+ if inst.terminate(force=force):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
+ instances.pop(len(instances) - 1)
+ else:
+ try:
+ if re.search(r"-\[\d+,\d+\]-", host_name):
+ module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
+ 'suffix to the hostname, you can set unique_suffix to True')
+ new_instances = run_instance(module, ecs, count - len(instances))
+ if new_instances:
+ changed = True
+ instances.extend(new_instances)
+ except Exception as e:
+ module.fail_json(msg="Create new instances got an error: {0}".format(e))
+
+ # Security Group join/leave begin
+ security_groups = module.params['security_groups']
+ if security_groups:
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ for inst in instances:
+ existing = inst.security_group_ids['security_group_id']
+ remove = list(set(existing).difference(set(security_groups)))
+ add = list(set(security_groups).difference(set(existing)))
+ for sg in remove:
+ if inst.leave_security_group(sg):
+ changed = True
+ for sg in add:
+ if inst.join_security_group(sg):
+ changed = True
+ # Security Group join/leave ends here
+
+ # Attach/Detach key pair
+ inst_ids = []
+ for inst in instances:
+ if key_name is not None and key_name != inst.key_name:
+ if key_name == "":
+ if inst.detach_key_pair():
+ changed = True
+ else:
+ inst_ids.append(inst.id)
+ if inst_ids:
+ changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
+
+ # Modify instance attribute
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.id not in ids:
+ ids.append(inst.id)
+
+ # Modify instance charge type
+ ids = []
+ for inst in instances:
+ if inst.instance_charge_type != instance_charge_type:
+ ids.append(inst.id)
+ if ids:
+ params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
+ "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
+ "auto_pay": True}
+ if instance_charge_type == 'PrePaid':
+ params['period'] = module.params['period']
+ params['period_unit'] = module.params['period_unit']
+
+ if ecs.modify_instance_charge_type(**params):
+ changed = True
+ wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
+
+ else:
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ if state == 'running':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.status != "running":
+ targets.append(inst.id)
+ ids.append(inst.id)
+ if targets and ecs.start_instances(instance_ids=targets):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Start instances got an error: {0}'.format(e))
+ elif state == 'stopped':
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != "stopped":
+ targets.append(inst.id)
+ if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
+ changed = True
+ ids.extend(targets)
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='Stop instances got an error: {0}'.format(e))
+ elif state == 'restarted':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ targets.append(inst.id)
+ if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
+
+ tags = module.params['tags']
+ if module.params['purge_tags']:
+ for inst in instances:
+ if not tags:
+ tags = inst.tags
+ try:
+ if inst.remove_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+ if tags:
+ for inst in instances:
+ try:
+ if inst.add_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
new file mode 100644
index 00000000..1364a42c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: atomic_container
+short_description: Manage the containers on the atomic host platform
+description:
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+author: "Giuseppe Scrivano (@giuseppe)"
+notes:
+ - Host should support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: True
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: True
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: True
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+
+- name: Install the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: latest
+ mode: system
+ values:
+ - ETCD_NAME=etcd.server
+
+- name: Uninstall the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: absent
+ mode: system
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+
+# import module snippets
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_install(module, mode, rootfs, container, image, values_list, backend):
+ system_list = ["--system"] if mode == 'system' else []
+ user_list = ["--user"] if mode == 'user' else []
+ rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
+ args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, image, values_list):
+ args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name, backend):
+ args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=True)
+
+
+def do_rollback(module, name):
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ mode = module.params['mode']
+ name = module.params['name']
+ image = module.params['image']
+ rootfs = module.params['rootfs']
+ values = module.params['values']
+ backend = module.params['backend']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+ present = name in out
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, mode, rootfs, name, image, values_list, backend)
+ elif state == 'latest':
+ do_update(module, name, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="The container is not present", changed=False)
+ else:
+ do_uninstall(module, name, backend)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mode=dict(default=None, choices=['user', 'system']),
+ name=dict(required=True),
+ image=dict(required=True),
+ rootfs=dict(default=None),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ backend=dict(required=True, choices=['docker', 'ostree']),
+ values=dict(type='list', default=[], elements='str'),
+ ),
+ )
+
+ if module.params['values'] is not None and module.params['mode'] == 'default':
+ module.fail_json(msg="values is supported only with user or system mode")
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
new file mode 100644
index 00000000..993933e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing C(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [ version ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
+ community.general.atomic_host:
+ revision: latest
+
+- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+ community.general.atomic_host:
+ revision: 23.130
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Already on latest'
+'''
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ revision=dict(type='str', default='latest', aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
new file mode 100644
index 00000000..c915ed0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should support C(atomic) command.
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: [ 'docker', 'ostree' ]
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: True
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ choices: [ 'absent', 'latest', 'present' ]
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or Stop the container.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = r'''
+- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+ community.general.atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+- name: Pull busybox to the OSTree backend
+ community.general.atomic_image:
+ name: busybox
+ state: latest
+ backend: ostree
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ backend = module.params['backend']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ if backend:
+ if state == 'present' or state == 'latest':
+ args = ['atomic', 'pull', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ out_run = ""
+ if started:
+ args = ['atomic', 'run', "--storage=%s" % backend, image]
+ rc, out_run, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=(out + out_run), changed=changed)
+ elif state == 'absent':
+ args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Unable to find" not in out
+ module.exit_json(msg=out, changed=changed)
+ return
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backend=dict(type='str', choices=['docker', 'ostree']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
+ started=dict(type='bool', default=True),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
new file mode 100644
index 00000000..a2750937
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ type: str
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - This option does nothing and will be removed in community.general 3.0.0.
+ type: bool
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk:
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'),
+ exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
new file mode 100644
index 00000000..7a10c0b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ type: str
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ type: str
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ type: list
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ type: str
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ type: str
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ type: int
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ id=dict(),
+ alias=dict(required=True),
+ alert_recipients=dict(type='list'),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str'),
+ threshold=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
new file mode 100644
index 00000000..c45ca919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ type: list
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ type: str
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ type: dict
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or uninstall the package. Currently it supports only "present" for install action.
+ type: str
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: str
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ community.general.clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True), # @FIXME should be bool?
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
new file mode 100644
index 00000000..105d793c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
@@ -0,0 +1,584 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall policies on Centurylink Cloud
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present'
+ type: list
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ type: list
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ type: list
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ type: str
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ type: str
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ type: str
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: str
+ default: 'True'
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ type: str
+ choices: [True, False]
+ default: True
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: str
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True),
+ destination_account_alias=dict(),
+ firewall_policy_id=dict(),
+ ports=dict(type='list'),
+ source=dict(type='list'),
+ destination=dict(type='list'),
+ wait=dict(default=True), # @FIXME type=bool
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(default=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
new file mode 100644
index 00000000..a80cc400
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+options:
+ name:
+ description:
+ - The name of the Server Group
+ type: str
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ type: str
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ type: str
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ type: str
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+# Delete a Server Group
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ if not self.root_group:
+ raise AssertionError("Implementation Error: Root Group not set")
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
new file mode 100644
index 00000000..2a8d2e9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
@@ -0,0 +1,935 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ type: str
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ type: str
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ type: str
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ type: str
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ type: str
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ type: str
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ type: list
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ type: str
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ type: str
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node not in nodes:
+ changed = True
+ nodes.append(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
new file mode 100644
index 00000000..3c1b08cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
@@ -0,0 +1,965 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ type: list
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ type: str
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Set the cpu count to 4 on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: Set the memory to 8GB on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: Set the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: Remove the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: Add the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: Remove the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
new file mode 100644
index 00000000..e31546b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ type: str
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ type: list
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ type: list
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
new file mode 100644
index 00000000..6b7e9c4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
@@ -0,0 +1,1557 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ type: list
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ type: bool
+ default: 'no'
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ type: str
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ default: 1
+ type: int
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
+ type: str
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ type: int
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ type: str
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ type: list
+ default: []
+ description:
+ description:
+ - The description to set for the server.
+ type: str
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ type: int
+ group:
+ description:
+ - The Server Group to create servers under.
+ type: str
+ default: 'Default Group'
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ type: str
+ location:
+ description:
+ - The Datacenter to create servers in.
+ type: str
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ type: bool
+ default: 'no'
+ required: False
+ memory:
+ description:
+ - Memory in GB.
+ type: int
+ default: 1
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ type: str
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ type: str
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ type: list
+ default: []
+ password:
+ description:
+ - Password for the administrator / root user
+ type: str
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ type: str
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ type: str
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ type: list
+ default: []
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ type: str
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ type: list
+ default: []
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ type: str
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ type: str
+ type:
+ description:
+ - The type of server to create.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ type: str
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ type: str
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1, type='int'),
+ memory=dict(default=1, type='int'),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None, no_log=True),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException:
+ module.fail_json(msg="Unable to find location: {0}".format(location))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ time.sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
new file mode 100644
index 00000000..1d289f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ type: list
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ type: int
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ type: str
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7, type='int'),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
new file mode 100644
index 00000000..2187ceaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_network
+short_description: Create, update, and delete MCP 1.0 & 2.0 networks
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Create, update, and delete MCP 1.0 & 2.0 networks
+author: 'Aimon Bustardo (@aimonb)'
+options:
+ name:
+ description:
+ - The name of the network domain to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Additional description of the network domain.
+ required: false
+ type: str
+ service_plan:
+ description:
+ - The service plan, either "ESSENTIALS" or "ADVANCED".
+ - MCP 2.0 Only.
+ choices: [ESSENTIALS, ADVANCED]
+ default: ESSENTIALS
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an MCP 1.0 network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA5
+ name: mynet
+
+- name: Create an MCP 2.0 network
+ community.general.dimensiondata_network:
+ region: na
+ mcp_user: my_user
+ mcp_password: my_password
+ location: NA9
+ name: mynet
+ service_plan: ADVANCED
+
+- name: Delete a network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA1
+ name: mynet
+ state: absent
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only)
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only)
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only)
+ type: bool
+ sample: false
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
+from ansible.module_utils._text import to_native
+
+if HAS_LIBCLOUD:
+ from libcloud.compute.base import NodeLocation
+ from libcloud.common.dimensiondata import DimensionDataAPIException
+
+
+class DimensionDataNetworkModule(DimensionDataModule):
+ """
+ The dimensiondata_network module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data network module.
+ """
+
+ super(DimensionDataNetworkModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.service_plan = self.module.params['service_plan']
+ self.state = self.module.params['state']
+
+ def state_present(self):
+ network = self._get_network()
+
+ if network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network already exists',
+ network=self._network_to_dict(network)
+ )
+
+ network = self._create_network()
+
+ self.module.exit_json(
+ changed=True,
+ msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
+ network=self._network_to_dict(network)
+ )
+
+ def state_absent(self):
+ network = self._get_network()
+
+ if not network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network "%s" does not exist' % self.name,
+ network=self._network_to_dict(network)
+ )
+
+ self._delete_network(network)
+
+ def _get_network(self):
+ if self.mcp_version == '1.0':
+ networks = self.driver.list_networks(location=self.location)
+ else:
+ networks = self.driver.ex_list_network_domains(location=self.location)
+
+ matched_network = [network for network in networks if network.name == self.name]
+ if matched_network:
+ return matched_network[0]
+
+ return None
+
+ def _network_to_dict(self, network):
+ network_dict = dict(
+ id=network.id,
+ name=network.name,
+ description=network.description
+ )
+
+ if isinstance(network.location, NodeLocation):
+ network_dict['location'] = network.location.id
+ else:
+ network_dict['location'] = network.location
+
+ if self.mcp_version == '1.0':
+ network_dict['private_net'] = network.private_net
+ network_dict['multicast'] = network.multicast
+ network_dict['status'] = None
+ else:
+ network_dict['private_net'] = None
+ network_dict['multicast'] = None
+ network_dict['status'] = network.status
+
+ return network_dict
+
+ def _create_network(self):
+
+ # Make sure service_plan argument is defined
+ if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
+ self.module.fail_json(
+ msg='service_plan required when creating network and location is MCP 2.0'
+ )
+
+ # Create network
+ try:
+ if self.mcp_version == '1.0':
+ network = self.driver.ex_create_network(
+ self.location,
+ self.name,
+ description=self.description
+ )
+ else:
+ network = self.driver.ex_create_network_domain(
+ self.location,
+ self.name,
+ self.module.params['service_plan'],
+ description=self.description
+ )
+ except DimensionDataAPIException as e:
+
+ self.module.fail_json(
+ msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ if self.module.params['wait'] is True:
+ network = self._wait_for_network_state(network.id, 'NORMAL')
+
+ return network
+
+ def _delete_network(self, network):
+ try:
+ if self.mcp_version == '1.0':
+ deleted = self.driver.ex_delete_network(network)
+ else:
+ deleted = self.driver.ex_delete_network_domain(network)
+
+ if deleted:
+ self.module.exit_json(
+ changed=True,
+ msg="Deleted network with id %s" % network.id
+ )
+
+ self.module.fail_json(
+ "Unexpected failure deleting network with id %s", network.id
+ )
+
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ def _wait_for_network_state(self, net_id, state_to_wait_for):
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_network_domain,
+ self.module.params['wait_poll_interval'],
+ self.module.params['wait_time'],
+ net_id
+ )
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
+ exception=traceback.format_exc()
+ )
+
+
+def main():
+ module = DimensionDataNetworkModule()
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
new file mode 100644
index 00000000..26c621f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_vlan
+short_description: Manage a VLAN in a Cloud Control network domain.
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Manage VLANs in Cloud Control network domains.
+author: 'Adam Friedman (@tintoy)'
+options:
+ name:
+ description:
+ - The name of the target VLAN.
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the VLAN.
+ type: str
+ network_domain:
+ description:
+ - The Id or name of the target network domain.
+ required: true
+ type: str
+ private_ipv4_base_address:
+ description:
+ - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ type: str
+ private_ipv4_prefix_size:
+ description:
+ - The size of the IPv4 address space, e.g 24.
+ - Required, if C(private_ipv4_base_address) is specified.
+ type: int
+ state:
+ description:
+ - The desired state for the target VLAN.
+ - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ choices: [present, absent, readonly]
+ default: present
+ type: str
+ allow_expand:
+ description:
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
+ - If C(False), the module will fail under these conditions.
+ - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Add or update VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ description: A test VLAN
+ private_ipv4_base_address: 192.168.23.0
+ private_ipv4_prefix_size: 24
+ state: present
+ wait: yes
+
+- name: Read / get VLAN details
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ state: readonly
+ wait: yes
+
+- name: Delete a VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan_1
+ state: absent
+ wait: yes
+'''
+
+RETURN = '''
+vlan:
+ description: Dictionary describing the VLAN.
+ returned: On success when I(state) is 'present'
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
+
+try:
+ from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
+
+ HAS_LIBCLOUD = True
+
+except ImportError:
+ DimensionDataVlan = None
+
+ HAS_LIBCLOUD = False
+
+
+class DimensionDataVlanModule(DimensionDataModule):
+ """
+ The dimensiondata_vlan module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data VLAN module.
+ """
+
+ super(DimensionDataVlanModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ network_domain=dict(required=True, type='str'),
+ private_ipv4_base_address=dict(default='', type='str'),
+ private_ipv4_prefix_size=dict(default=0, type='int'),
+ allow_expand=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'readonly'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.network_domain_selector = self.module.params['network_domain']
+ self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
+ self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
+ self.state = self.module.params['state']
+ self.allow_expand = self.module.params['allow_expand']
+
+ if self.wait and self.state != 'present':
+ self.module.fail_json(
+ msg='The wait parameter is only supported when state is "present".'
+ )
+
+ def state_present(self):
+ """
+ Ensure that the target VLAN is present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ vlan = self._create_vlan(network_domain)
+ self.module.exit_json(
+ msg='Created VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+ else:
+ diff = VlanDiff(vlan, self.module.params)
+ if not diff.has_changes():
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+
+ return
+
+ try:
+ diff.ensure_legal_change()
+ except InvalidVlanChangeError as invalid_vlan_change:
+ self.module.fail_json(
+ msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
+ self.name, self.network_domain_selector, invalid_vlan_change
+ )
+ )
+
+ if diff.needs_expand() and not self.allow_expand:
+ self.module.fail_json(
+ msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
+ self.private_ipv4_prefix_size
+ ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
+ vlan.private_ipv4_range_size
+ ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
+ )
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ if diff.needs_edit():
+ vlan.name = self.name
+ vlan.description = self.description
+
+ self.driver.ex_update_vlan(vlan)
+
+ if diff.needs_expand():
+ vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
+ self.driver.ex_expand_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Updated VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ def state_readonly(self):
+ """
+ Read the target VLAN's state.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if vlan:
+ self.module.exit_json(
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+ else:
+ self.module.fail_json(
+ msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ )
+ )
+
+ def state_absent(self):
+ """
+ Ensure that the target VLAN is not present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=False
+ )
+
+ return
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ self._delete_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ def _get_vlan(self, network_domain):
+ """
+ Retrieve the target VLAN details from CloudControl.
+
+ :param network_domain: The target network domain.
+ :return: The VLAN, or None if the target VLAN was not found.
+ :rtype: DimensionDataVlan
+ """
+
+ vlans = self.driver.ex_list_vlans(
+ location=self.location,
+ network_domain=network_domain
+ )
+ matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
+ if matching_vlans:
+ return matching_vlans[0]
+
+ return None
+
+ def _create_vlan(self, network_domain):
+ vlan = self.driver.ex_create_vlan(
+ network_domain,
+ self.name,
+ self.private_ipv4_base_address,
+ self.description,
+ self.private_ipv4_prefix_size
+ )
+
+ if self.wait:
+ vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
+
+ return vlan
+
+ def _delete_vlan(self, vlan):
+ try:
+ self.driver.ex_delete_vlan(vlan)
+
+ # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
+ if self.wait:
+ self._wait_for_vlan_state(vlan, 'NOT_FOUND')
+
+ except DimensionDataAPIException as api_exception:
+ self.module.fail_json(
+ msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
+ vlan.id, api_exception.msg
+ )
+ )
+
+ def _wait_for_vlan_state(self, vlan, state_to_wait_for):
+ network_domain = self._get_network_domain()
+
+ wait_poll_interval = self.module.params['wait_poll_interval']
+ wait_time = self.module.params['wait_time']
+
+ # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
+
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_vlan,
+ wait_poll_interval,
+ wait_time,
+ vlan
+ )
+
+ except DimensionDataAPIException as api_exception:
+ if api_exception.code != 'RESOURCE_NOT_FOUND':
+ raise
+
+ return DimensionDataVlan(
+ id=vlan.id,
+ status='NOT_FOUND',
+ name='',
+ description='',
+ private_ipv4_range_address='',
+ private_ipv4_range_size=0,
+ ipv4_gateway='',
+ ipv6_range_address='',
+ ipv6_range_size=0,
+ ipv6_gateway='',
+ location=self.location,
+ network_domain=network_domain
+ )
+
+ def _get_network_domain(self):
+ """
+ Retrieve the target network domain from the Cloud Control API.
+
+ :return: The network domain.
+ """
+
+ try:
+ return self.get_network_domain(
+ self.network_domain_selector, self.location
+ )
+ except UnknownNetworkError:
+ self.module.fail_json(
+ msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
+ self.network_domain_selector, self.location
+ )
+ )
+
+ return None
+
+
+class InvalidVlanChangeError(Exception):
+ """
+ Error raised when an illegal change to VLAN state is attempted.
+ """
+
+ pass
+
+
+class VlanDiff(object):
+ """
+ Represents differences between VLAN information (from CloudControl) and module parameters.
+ """
+
+ def __init__(self, vlan, module_params):
+ """
+
+ :param vlan: The VLAN information from CloudControl.
+ :type vlan: DimensionDataVlan
+ :param module_params: The module parameters.
+ :type module_params: dict
+ """
+
+ self.vlan = vlan
+ self.module_params = module_params
+
+ self.name_changed = module_params['name'] != vlan.name
+ self.description_changed = module_params['description'] != vlan.description
+ self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
+ self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
+
+ # Is configured prefix size greater than or less than the actual prefix size?
+ private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
+ self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
+ self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
+
+ def has_changes(self):
+ """
+ Does the VlanDiff represent any changes between the VLAN and module configuration?
+
+ :return: True, if there are change changes; otherwise, False.
+ """
+
+ return self.needs_edit() or self.needs_expand()
+
+ def ensure_legal_change(self):
+ """
+ Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
+
+ - private_ipv4_base_address cannot be changed
+ - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
+
+ :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
+ """
+
+ # Cannot change base address for private IPv4 network.
+ if self.private_ipv4_base_address_changed:
+ raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
+
+ # Cannot shrink private IPv4 network (by increasing prefix size).
+ if self.private_ipv4_prefix_size_increased:
+ raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
+
+ def needs_edit(self):
+ """
+ Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
+
+ :return: True, if an Edit operation is required; otherwise, False.
+ """
+
+ return self.name_changed or self.description_changed
+
+ def needs_expand(self):
+ """
+ Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
+
+ The VLAN's network is expanded by reducing the size of its network prefix.
+
+ :return: True, if an Expand operation is required; otherwise, False.
+ """
+
+ return self.private_ipv4_prefix_size_decreased
+
+
+def vlan_to_dict(vlan):
+ return {
+ 'id': vlan.id,
+ 'name': vlan.name,
+ 'description': vlan.description,
+ 'location': vlan.location.id,
+ 'private_ipv4_base_address': vlan.private_ipv4_range_address,
+ 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
+ 'private_ipv4_gateway_address': vlan.ipv4_gateway,
+ 'ipv6_base_address': vlan.ipv6_range_address,
+ 'ipv6_prefix_size': vlan.ipv6_range_size,
+ 'ipv6_gateway_address': vlan.ipv6_gateway,
+ 'status': vlan.status
+ }
+
+
+def main():
+ module = DimensionDataVlanModule()
+
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'readonly':
+ module.state_readonly()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py
new file mode 100644
index 00000000..5e7e426c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+options:
+ data:
+ description:
+ - The value of the config. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.general.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the config data
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove config foo
+ community.general.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ for config in configs:
+ if config['Spec']['Name'] == self.name:
+ return config
+ return None
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ config_id = self.client.create_config(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ config = self.get_config()
+ if config:
+ self.results['config_id'] = config['ID']
+ data_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ config = self.get_config()
+ if config:
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py
new file mode 100644
index 00000000..30033ebf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py
@@ -0,0 +1,3563 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+
+notes:
+ - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify *all* options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+ Please note that several options have default values; if the container to be restarted uses different values for
+ these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
+ I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
+ can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
+ community.general 3.0.0 on.
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: no
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as C(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which isn't present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to *all* comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - Various module options used to have default values. This causes problems with
+ containers which use different values for these options.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 3.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(container_default_behavior) to an explicit
+ value.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: '0.2.0'
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ version_added: '0.2.0'
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: no
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(True).
+ - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ type: bool
+ default: no
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - This option requires Docker API >= 1.25.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: yes
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]).
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "*Note* that from community.general 3.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the I(purge_networks) option.
+ - Note that as opposed to C(docker run ...), M(community.general.docker_container) does not remove the default
+ network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce
+ the removal of the default network (and all other networks not explicitly mentioned in I(networks)).
+ Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from community.general 2.0.0 on.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: yes
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "When networks are provided to the module via the I(networks) option, the module
+ behaves differently than C(docker run --network): C(docker run --network other)
+ will create a container with network C(other) attached, but the default network
+ not attached. This module with I(networks: {name: other}) will create a container
+ with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes),
+ the C(default) network will be removed afterwards."
+ - "If I(networks_cli_compatible) is set to C(yes), this module will behave as
+ C(docker run --network) and will *not* add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "*Note* that docker CLI also sets I(network_mode) to the name of the first network
+ added if C(--network) is specified. For more compatibility with docker CLI, you
+ explicitly have to set I(network_mode) to the name of the first network you're
+ adding. This behavior will change for community.general 3.0.0: then I(network_mode) will
+ automatically be set to the first network name in I(networks) if I(network_mode)
+ is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
+ is C(true)."
+ - Current value is C(no). A new default of C(yes) will be set in community.general 2.0.0.
+ type: bool
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file) or C(journald).
+ type: bool
+ default: no
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
+ Docker SDK for Python (docker) allow all values supported by the Docker daemon.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
+ is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
+ to resolve hostnames."
+ - A value of C(all) will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "*Note:* images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: no
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ type: bool
+ default: no
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: no
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ version_added: '0.2.0'
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: no
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the I(ignore_image) option.
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ trust_image_content:
+ description:
+ - If C(yes), skip image verification.
+ - The option has never been used by the module. It will be removed in community.general 3.0.0.
+ type: bool
+ default: no
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.general.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.general.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.general.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.general.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.general.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.general.docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.general.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.general.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.general.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.general.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ community.general.docker_container:
+ name: sleepy
+ purge_networks: yes
+
+- name: Start a container and use an env file
+ community.general.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # don't restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Start container with block device read limit
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_container). Note that the returned fact will be removed in
+ community.general 2.0.0.
+ - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
+ conflicts with the connection plugin.
+ - Empty if I(state) is C(absent)
+ - If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import os
+import re
+import shlex
+import traceback
+from distutils.version import LooseVersion
+from time import sleep
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+ clean_dict_booleans_for_docker_api,
+ omit_none_from_dict,
+ parse_healthcheck,
+ DOCKER_COMMON_ARGS,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version
+ if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
+ from docker.types import Ulimit, LogConfig
+ from docker import types as docker_types
+ else:
+ from docker.utils.types import Ulimit, LogConfig
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'kernel_memory',
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+
+def is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def parse_port_range(range_or_port, client):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ client.fail('Invalid port: "{0}"'.format(range_or_port))
+
+
+def split_colon_ipv6(text, client):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.auto_remove = None
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cap_drop = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpus = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.detach = None
+ self.debug = None
+ self.devices = None
+ self.device_read_bps = None
+ self.device_write_bps = None
+ self.device_read_iops = None
+ self.device_write_iops = None
+ self.device_requests = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.domainname = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.healthcheck = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.init = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.output_logs = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.mounts = None
+ self.name = None
+ self.network_mode = None
+ self.userns_mode = None
+ self.networks = None
+ self.networks_cli_compatible = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.pids_limit = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.removal_wait_timeout = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.runtime = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.tmpfs = None
+ self.trust_image_content = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+ self.working_dir = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+ self.comparisons = client.comparisons
+
+ # If state is 'absent', parameters do not have to be parsed or interpreted.
+ # Only the container's name is needed.
+ if self.state == 'absent':
+ return
+
+ if self.cpus is not None:
+ self.cpus = int(round(self.cpus * 1E9))
+
+ if self.groups:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports in ('all', 'ALL'):
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.tmpfs = self._parse_tmpfs()
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.sysctls = self._parse_sysctls()
+ self.log_config = self._parse_log_config()
+ try:
+ self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
+ except ValueError as e:
+ self.fail(to_native(e))
+
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+ self.pid_mode = self._replace_container_names(self.pid_mode)
+ self.ipc_mode = self._replace_container_names(self.ipc_mode)
+ self.network_mode = self._replace_container_names(self.network_mode)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ if self.mac_address:
+ # Ensure the MAC address uses colons instead of hyphens for later comparison
+ self.mac_address = self.mac_address.replace('-', ':')
+
+ if self.entrypoint:
+ # convert from list to str.
+ self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
+
+ if self.command:
+ # convert from list to str
+ if isinstance(self.command, list):
+ self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
+
+ self.mounts_opt, self.expected_mounts = self._process_mounts()
+
+ self._check_mount_target_collisions()
+
+ for param_name in ["device_read_bps", "device_write_bps"]:
+ if client.module.params.get(param_name):
+ self._process_rate_bps(option=param_name)
+
+ for param_name in ["device_read_iops", "device_write_iops"]:
+ if client.module.params.get(param_name):
+ self._process_rate_iops(option=param_name)
+
+ if self.device_requests:
+ for dr_index, dr in enumerate(self.device_requests):
+ # Make sure that capabilities are lists of lists of strings
+ if dr['capabilities']:
+ for or_index, or_list in enumerate(dr['capabilities']):
+ for and_index, and_term in enumerate(or_list):
+ if not isinstance(and_term, string_types):
+ self.fail(
+ "device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
+ dr_index, or_index, and_index))
+ or_list[and_index] = to_native(and_term)
+ # Make sure that options is a dictionary mapping strings to strings
+ if dr['options']:
+ dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ cpuset_mems='cpuset_mems',
+ mem_limit='memory',
+ mem_reservation='memory_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory',
+ restart_policy='restart_policy',
+ )
+
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
+ restart_policy = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+ result[key] = restart_policy
+ elif self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ domainname='domainname',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ working_dir='working_dir',
+ stop_timeout='stop_timeout',
+ healthcheck='healthcheck',
+ )
+
+ if self.client.docker_py_version < LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ create_params['cpu_shares'] = 'cpu_shares'
+ create_params['volume_driver'] = 'volume_driver'
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+
+ if self.disable_healthcheck:
+ # Make sure image's health check is overridden
+ result['healthcheck'] = {'test': ['NONE']}
+
+ if self.networks_cli_compatible and self.networks:
+ network = self.networks[0]
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if network.get(para):
+ params[para] = network[para]
+ network_config = dict()
+ network_config[network['name']] = self.client.create_endpoint_config(**params)
+ result['networking_config'] = self.client.create_networking_config(network_config)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ # Only pass anonymous volumes to create container
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params = dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ dns='dns_servers',
+ dns_opt='dns_opts',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ userns_mode='userns_mode',
+ cap_add='capabilities',
+ cap_drop='cap_drop',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ sysctls='sysctls',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ oom_kill_disable='oom_killer',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode',
+ tmpfs='tmpfs',
+ init='init',
+ uts_mode='uts',
+ runtime='runtime',
+ auto_remove='auto_remove',
+ device_read_bps='device_read_bps',
+ device_write_bps='device_write_bps',
+ device_read_iops='device_read_iops',
+ device_write_iops='device_write_iops',
+ pids_limit='pids_limit',
+ mounts='mounts',
+ nano_cpus='cpus',
+ )
+
+ if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
+ # blkio_weight can always be updated, but can only be set on creation
+ # when Docker SDK for Python and Docker API are new enough
+ host_config_params['blkio_weight'] = 'blkio_weight'
+
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ host_config_params['cpu_shares'] = 'cpu_shares'
+ host_config_params['volume_driver'] = 'volume_driver'
+
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ if 'mounts' in params:
+ params['mounts'] = self.mounts_opt
+
+ if self.device_requests is not None:
+ params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
+
+ return self.client.create_host_config(**params)
+
+ @property
+ def default_host_ip(self):
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ try:
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ except NotFound as nfe:
+ self.client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
+ exception=traceback.format_exc()
+ )
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ return 'all'
+
+ default_ip = self.default_host_ip
+
+ binds = {}
+ for port in self.published_ports:
+ parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = parse_port_range(container_port, self.client)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(default_ip,)]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(default_ip, parts[0])]
+ else:
+ port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ return binds
+
+ def _get_volume_binds(self, volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = []
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result.append((parsed_link[0], parsed_link[1]))
+ else:
+ result.append((parsed_link[0], parsed_link[0]))
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_sysctls(self):
+ '''
+ Turn sysctls into an hash of Sysctl objects
+ '''
+ return self.sysctls
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config=dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = dict()
+ for k, v in self.log_options.items():
+ if not isinstance(v, string_types):
+ self.client.module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ self.log_options[k] = v
+ options['Config'][k] = v
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _parse_tmpfs(self):
+ '''
+ Turn tmpfs into a hash of Tmpfs objects
+ '''
+ result = dict()
+ if self.tmpfs is None:
+ return result
+
+ for tmpfs_spec in self.tmpfs:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return result
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if self.env:
+ for name, value in self.env.items():
+ if not isinstance(value, string_types):
+ self.fail("Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+ return network_id
+
+ def _process_mounts(self):
+ if self.mounts is None:
+ return None, None
+ mounts_list = []
+ mounts_expected = []
+ for mount in self.mounts:
+ target = mount['target']
+ datatype = mount['type']
+ mount_dict = dict(mount)
+ # Sanity checks (so we don't wait for docker-py to barf on input)
+ if mount_dict.get('source') is None and datatype != 'tmpfs':
+ self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
+ mount_option_types = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+ )
+ for option, req_datatype in mount_option_types.items():
+ if mount_dict.get(option) is not None and datatype != req_datatype:
+ self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
+ # Handle volume_driver and volume_options
+ volume_driver = mount_dict.pop('volume_driver')
+ volume_options = mount_dict.pop('volume_options')
+ if volume_driver:
+ if volume_options:
+ volume_options = clean_dict_booleans_for_docker_api(volume_options)
+ mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict.get('tmpfs_size') is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
+ if mount_dict.get('tmpfs_mode') is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+ # Fill expected mount dict
+ mount_expected = dict(mount)
+ mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
+ mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
+ # Add result to lists
+ mounts_list.append(docker_types.Mount(**mount_dict))
+ mounts_expected.append(omit_none_from_dict(mount_expected))
+ return mounts_list, mounts_expected
+
+ def _process_rate_bps(self, option):
+ """
+ Format device_read_bps and device_write_bps option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _process_rate_iops(self, option):
+ """
+ Format device_read_iops and device_write_iops option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _replace_container_names(self, mode):
+ """
+ Parse IPC and PID modes. If they contain a container name, replace
+ with the container's ID.
+ """
+ if mode is None or not mode.startswith('container:'):
+ return mode
+ container_name = mode[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve it's ID)
+ container = self.client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return mode
+ return 'container:{0}'.format(container['Id'])
+
+ def _check_mount_target_collisions(self):
+ last = dict()
+
+ def f(t, name):
+ if t in last:
+ if name == last[t]:
+ self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if self.expected_mounts:
+ for t in [m['target'] for m in self.expected_mounts]:
+ f(t, 'mounts')
+ if self.volumes:
+ for v in self.volumes:
+ vs = v.split(':')
+ f(vs[0 if len(vs) == 1 else 1], 'volumes')
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_sysctls = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+ self.parameters.expected_device_requests = None
+ self.parameters_map = dict()
+ self.parameters_map['expected_links'] = 'links'
+ self.parameters_map['expected_ports'] = 'expected_ports'
+ self.parameters_map['expected_exposed'] = 'exposed_ports'
+ self.parameters_map['expected_volumes'] = 'volumes'
+ self.parameters_map['expected_ulimits'] = 'ulimits'
+ self.parameters_map['expected_sysctls'] = 'sysctls'
+ self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
+ self.parameters_map['expected_env'] = 'env'
+ self.parameters_map['expected_entrypoint'] = 'entrypoint'
+ self.parameters_map['expected_binds'] = 'volumes'
+ self.parameters_map['expected_cmd'] = 'command'
+ self.parameters_map['expected_devices'] = 'devices'
+ self.parameters_map['expected_healthcheck'] = 'healthcheck'
+ self.parameters_map['expected_mounts'] = 'mounts'
+ self.parameters_map['expected_device_requests'] = 'device_requests'
+
+ def fail(self, msg):
+ self.parameters.client.fail(msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Status') == 'removing'
+ return False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ @property
+ def paused(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Paused', False)
+ return False
+
+ def _compare(self, a, b, compare):
+ '''
+ Compare values a and b as described in compare.
+ '''
+ return compare_generic(a, b, compare['comparison'], compare['type'])
+
+ def _decode_mounts(self, mounts):
+ if not mounts:
+ return mounts
+ result = []
+ empty_dict = dict()
+ for mount in mounts:
+ res = dict()
+ res['type'] = mount.get('Type')
+ res['source'] = mount.get('Source')
+ res['target'] = mount.get('Target')
+ res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
+ res['consistency'] = mount.get('Consistency')
+ res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
+ res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
+ res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
+ res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
+ res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
+ res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
+ res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
+ result.append(res)
+ return result
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+ self.parameters.expected_healthcheck = self._get_expected_healthcheck()
+ self.parameters.expected_device_requests = self._get_expected_device_requests()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ expected_cmd=config.get('Cmd'),
+ domainname=config.get('Domainname'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ init=host_config.get('Init'),
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ cap_drop=host_config.get('CapDrop'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ mac_address=config.get('MacAddress', network.get('MacAddress')),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ userns_mode=host_config.get('UsernsMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ runtime=host_config.get('Runtime'),
+ shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecurityOpt"),
+ stop_signal=config.get("StopSignal"),
+ tmpfs=host_config.get('Tmpfs'),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ expected_sysctls=host_config.get('Sysctls'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volume_driver=host_config.get('VolumeDriver'),
+ volumes_from=host_config.get('VolumesFrom'),
+ working_dir=config.get('WorkingDir'),
+ publish_all_ports=host_config.get('PublishAllPorts'),
+ expected_healthcheck=config.get('Healthcheck'),
+ disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
+ device_read_bps=host_config.get('BlkioDeviceReadBps'),
+ device_write_bps=host_config.get('BlkioDeviceWriteBps'),
+ device_read_iops=host_config.get('BlkioDeviceReadIOps'),
+ device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
+ expected_device_requests=host_config.get('DeviceRequests'),
+ pids_limit=host_config.get('PidsLimit'),
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ expected_mounts=self._decode_mounts(host_config.get('Mounts')),
+ cpus=host_config.get('NanoCpus'),
+ )
+ # Options which don't make sense without their accompanying option
+ if self.parameters.log_driver:
+ config_mapping['log_driver'] = log_config.get('Type')
+ config_mapping['log_options'] = log_config.get('Config')
+
+ if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
+ # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
+ # it has a default value, that's why we have to jump through the hoops here
+ config_mapping['auto_remove'] = host_config.get('AutoRemove')
+
+ if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
+ # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
+ # stop_timeout has a hybrid role, in that it used to be something only used
+ # for stopping containers, and is now also used as a container property.
+ # That's why it needs special handling here.
+ config_mapping['stop_timeout'] = config.get('StopTimeout')
+
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # For docker API < 1.22, update_container() is not supported. Thus
+ # we need to handle all limits which are usually handled by
+ # update_container() as configuration changes which require a container
+ # restart.
+ restart_policy = host_config.get('RestartPolicy', dict())
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ config_mapping.update(dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ ))
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
+ if not minimal_version.get('supported', True):
+ continue
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
+ if getattr(self.parameters, key, None) is not None:
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ p = getattr(self.parameters, key)
+ c = value
+ if compare['type'] == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif compare['type'] == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if key == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(key, parameter=p, active=c)
+
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # update_container() call not supported
+ return False, []
+
+ host_config = self.container['HostConfig']
+
+ restart_policy = host_config.get('RestartPolicy') or dict()
+
+ config_mapping = dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ )
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None):
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ differences.add(key, parameter=getattr(self.parameters, key), active=value)
+ different = not differences.empty
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if self.parameters.published_ports is None:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links:
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = parts + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_device_requests(self):
+ if self.parameters.device_requests is None:
+ return None
+ device_requests = []
+ for dr in self.parameters.device_requests:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
+ expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = dict()
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Env'):
+ for env_var in image[self.parameters.client.image_inspect_source]['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
+ image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_sysctls(self, config_sysctls):
+ self.log('_get_expected_sysctls')
+ if config_sysctls is None:
+ return None
+ result = dict()
+ for key, value in config_sysctls.items():
+ result[key] = to_text(value, errors='surrogate_or_strict')
+ return result
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+ def _normalize_port(self, port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+ def _get_expected_healthcheck(self):
+ self.log('_get_expected_healthcheck')
+ expected_healthcheck = dict()
+
+ if self.parameters.healthcheck:
+ expected_healthcheck.update([(k.title().replace("_", ""), v)
+ for k, v in self.parameters.healthcheck.items()])
+
+ return expected_healthcheck
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
+ client.module.warn('log_options is ignored when log_driver is not specified')
+ if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
+ client.module.warn('healthcheck is ignored when test is not specified')
+ if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
+ client.module.warn('restart_retries is ignored when restart_policy is not specified')
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['ansible_facts'] = {'docker_container': self.facts}
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.client.get_container_by_id(container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image = self._get_image()
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.parameters.image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if self.parameters.comparisons['image']['comparison'] == 'strict':
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.parameters.image
+ if not image_to_use and container and container.Image:
+ image_to_use = container.Image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(image_to_use, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.Id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
+ self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.parameters.paused:
+ self.client.pause(container=container.Id)
+ else:
+ self.client.unpause(container=container.Id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
+ ))
+ container = self._get_container(container.Id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.parameters.paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.Id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.fail(msg, **kwargs)
+
+ def _output_logs(self, msg):
+ self.client.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ if is_image_name_id(self.parameters.image):
+ image = self.client.find_image_by_id(self.parameters.image)
+ else:
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not image or self.parameters.pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
+ has_network_differences, network_differences = container.has_network_differences()
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if diff['parameter'].get(para):
+ params[para] = diff['parameter'][para]
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ self.client.report_warnings(new_container)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.parameters.detach is False:
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ status = self.client.wait(container_id)['StatusCode']
+ else:
+ status = self.client.wait(container_id)
+ if self.parameters.auto_remove:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.parameters.output_logs:
+ self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ config = self.client.inspect_container(container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+
+ if logging_driver in ('json-file', 'journald'):
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if self.parameters.output_logs:
+ self._output_logs(msg=output)
+ else:
+ output = "Result logged using `%s` driver" % logging_driver
+
+ if status != 0:
+ self.fail(output, status=status)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ result = self.client.update_container(container_id, **update_parameters)
+ self.client.report_warnings(result)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ dummy = self.client.restart(container_id)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+
+def detect_ipvX_address_usage(client):
+ '''
+ Helper function to detect whether any specified network uses ipv4_address or ipv6_address
+ '''
+ for network in client.module.params.get("networks") or []:
+ if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
+ return True
+ return False
+
+
+class AnsibleDockerClientContainer(AnsibleDockerClient):
+ # A list of module options which are not docker container properties
+ __NON_CONTAINER_PROPERTY_OPTIONS = tuple([
+ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
+ 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
+ 'output_logs', 'paused', 'removal_wait_timeout'
+ ] + list(DOCKER_COMMON_ARGS.keys()))
+
+ def _parse_comparisons(self):
+ comparisons = {}
+ comp_aliases = {}
+ # Put in defaults
+ explicit_types = dict(
+ command='list',
+ devices='set(dict)',
+ device_requests='set(dict)',
+ dns_search_domains='list',
+ dns_servers='list',
+ env='set',
+ entrypoint='list',
+ etc_hosts='set',
+ mounts='set(dict)',
+ networks='set(dict)',
+ ulimits='set(dict)',
+ device_read_bps='set(dict)',
+ device_write_bps='set(dict)',
+ device_read_iops='set(dict)',
+ device_write_iops='set(dict)',
+ )
+ all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
+ default_values = dict(
+ stop_timeout='ignore',
+ )
+ for option, data in self.module.argument_spec.items():
+ all_options.add(option)
+ for alias in data.get('aliases', []):
+ all_options.add(alias)
+ # Ignore options which aren't used as container properties
+ if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
+ continue
+ # Determine option type
+ if option in explicit_types:
+ datatype = explicit_types[option]
+ elif data['type'] == 'list':
+ datatype = 'set'
+ elif data['type'] == 'dict':
+ datatype = 'dict'
+ else:
+ datatype = 'value'
+ # Determine comparison type
+ if option in default_values:
+ comparison = default_values[option]
+ elif datatype in ('list', 'value'):
+ comparison = 'strict'
+ else:
+ comparison = 'allow_more_present'
+ comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
+ # Keep track of aliases
+ comp_aliases[option] = option
+ for alias in data.get('aliases', []):
+ comp_aliases[alias] = option
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ comparisons['image']['comparison'] = 'ignore'
+ if self.module.params['purge_networks']:
+ comparisons['networks']['comparison'] = 'strict'
+ # Process options
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option, v in comparisons.items():
+ if option == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ v['comparison'] = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ comparisons[key_main]['comparison'] = value
+ elif value == 'allow_more_present':
+ if comparisons[key_main]['type'] == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ comparisons[key_main]['comparison'] = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Add implicit options
+ comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
+ comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
+ comparisons['disable_healthcheck'] = dict(type='value',
+ comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
+ name='disable_healthcheck')
+ # Check legacy values
+ if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+ self.comparisons = comparisons
+
+ def _get_additional_minimal_versions(self):
+ stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
+ stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
+ if stop_timeout_supported:
+ stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
+ "the container's stop_timeout configuration. "
+ "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
+ else:
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
+ "update the container's stop_timeout configuration." % (self.docker_api_version_str,))
+ self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
+
+ def __init__(self, **kwargs):
+ option_minimal_versions = dict(
+ # internal options
+ log_config=dict(),
+ publish_all_ports=dict(),
+ ports=dict(),
+ volume_binds=dict(),
+ name=dict(),
+ # normal options
+ device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
+ dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
+ ipc_mode=dict(docker_api_version='1.25'),
+ mac_address=dict(docker_api_version='1.25'),
+ oom_score_adj=dict(docker_api_version='1.22'),
+ shm_size=dict(docker_api_version='1.22'),
+ stop_signal=dict(docker_api_version='1.21'),
+ tmpfs=dict(docker_api_version='1.22'),
+ volume_driver=dict(docker_api_version='1.21'),
+ memory_reservation=dict(docker_api_version='1.21'),
+ kernel_memory=dict(docker_api_version='1.21'),
+ auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
+ init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
+ userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
+ pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
+ # specials
+ ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
+ detect_usage=detect_ipvX_address_usage,
+ usage_msg='ipv4_address or ipv6_address in networks'),
+ stop_timeout=dict(), # see _get_additional_minimal_versions()
+ )
+
+ super(AnsibleDockerClientContainer, self).__init__(
+ option_minimal_versions=option_minimal_versions,
+ option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
+ **kwargs
+ )
+
+ self.image_inspect_source = 'Config'
+ if self.docker_api_version < LooseVersion('1.21'):
+ self.image_inspect_source = 'ContainerConfig'
+
+ self._get_additional_minimal_versions()
+ self._parse_comparisons()
+
+ if self.module.params['container_default_behavior'] is None:
+ self.module.params['container_default_behavior'] = 'compatibility'
+ self.module.deprecate(
+ 'The container_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 3.0.0. To remove this warning, please specify an explicit value for it now',
+ version='3.0.0', collection_name='community.general' # was Ansible 2.14
+ )
+ if self.module.params['container_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory="0",
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+
+def main():
+ argument_spec = dict(
+ auto_remove=dict(type='bool'),
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='raw'),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpus=dict(type='float'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ detach=dict(type='bool'),
+ devices=dict(type='list', elements='str'),
+ device_read_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_write_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_read_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_write_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_requests=dict(type='list', elements='dict', options=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ dns_servers=dict(type='list', elements='str'),
+ dns_opts=dict(type='list', elements='str'),
+ dns_search_domains=dict(type='list', elements='str'),
+ domainname=dict(type='str'),
+ entrypoint=dict(type='list', elements='str'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ init=dict(type='bool'),
+ interactive=dict(type='bool'),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list', elements='str'),
+ log_driver=dict(type='str'),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ mounts=dict(type='list', elements='dict', options=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ )),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ )),
+ networks_cli_compatible=dict(type='bool'),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pid_mode=dict(type='str'),
+ pids_limit=dict(type='int'),
+ privileged=dict(type='bool'),
+ published_ports=dict(type='list', elements='str', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int'),
+ runtime=dict(type='str'),
+ security_opts=dict(type='list', elements='str'),
+ shm_size=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ sysctls=dict(type='dict'),
+ tmpfs=dict(type='list', elements='str'),
+ trust_image_content=dict(type='bool', default=False, removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ ulimits=dict(type='list', elements='str'),
+ user=dict(type='str'),
+ userns_mode=dict(type='str'),
+ uts=dict(type='str'),
+ volume_driver=dict(type='str'),
+ volumes=dict(type='list', elements='str'),
+ volumes_from=dict(type='list', elements='str'),
+ working_dir=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClientContainer(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']:
+ client.module.deprecate(
+ 'Please note that docker_container handles networks slightly different than docker CLI. '
+ 'If you specify networks, the default network will still be attached as the first network. '
+ '(You can specify purge_networks to remove all networks not explicitly listed.) '
+ 'This behavior will change in community.general 2.0.0. You can change the behavior now by setting '
+ 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting '
+ 'it to `no`',
+ version='2.0.0', collection_name='community.general', # was Ansible 2.12
+ )
+ if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
+ client.module.deprecate(
+ 'Please note that the default value for `network_mode` will change from not specified '
+ '(which is equal to `default`) to the name of the first network in `networks` if '
+ '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
+ 'change the behavior now by explicitly setting `network_mode` to the name of the first '
+ 'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
+ 'Please make sure that the value you set to `network_mode` equals the inspection result '
+ 'for existing containers, otherwise the module will recreate them. You can find out the '
+ 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
+ version='3.0.0', collection_name='community.general', # was Ansible 2.14
+ )
+
+ try:
+ cm = ContainerManager(client)
+ client.module.exit_json(**sanitize_result(cm.results))
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py
new file mode 100644
index 00000000..80025067
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.general.docker_container)
+ returns for a non-absent container.
+
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.general.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py
new file mode 100644
index 00000000..674f8ad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.general.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.general.docker_host_info:
+ images: yes
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.general.docker_host_info:
+ images: yes
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.general.docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.general.docker_host_info:
+ disk_usage: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(yes)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(yes)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(yes)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(images) is C(yes)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=yes). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(yes)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # Missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import clean_dict_booleans_for_docker_api
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ items = self.client.containers(**filter_arg)
+ elif docker_object == 'networks':
+ items = self.client.networks(**filter_arg)
+ elif docker_object == 'images':
+ items = self.client.images(**filter_arg)
+ elif docker_object == 'volumes':
+ items = self.client.volumes(**filter_arg)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ if docker_object != 'volumes':
+ return items
+ else:
+ return items['Volumes']
+
+ if docker_object == 'volumes':
+ items = items['Volumes']
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ option_minimal_versions = dict(
+ network_filters=dict(docker_py_version='2.0.2'),
+ disk_usage=dict(docker_py_version='2.2.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py
new file mode 100644
index 00000000..1e2976be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images.
+
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging an
+ image into a repository and archiving an image to a .tar file.
+ - Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
+ C(load), C(pull) or C(local)). This will be required from community.general 2.0.0 on.
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon, i.e. do not try to build, pull or load the image."
+ - "Before community.general 2.0.0, the value of this option will be auto-detected
+ to be backwards compatible, but a warning will be issued if it is not
+ explicitly specified. From community.general 2.0.0 on, auto-detection will be disabled
+ and this option will be made mandatory."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: yes
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: yes
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: no
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(yes) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ - Needs Docker SDK for Python >= 3.7.0.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used (except
+ if I(path) is specified as well, in which case building will take precedence).
+ From community.general 2.0.0 on, you have to set I(source) to C(load).
+ type: path
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ - Please use I(build.dockerfile) instead. This option will be removed in community.general 2.0.0.
+ type: str
+ force:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
+ C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
+ to force tagging an image.
+ - Please stop using this option, and use the more specialized force options
+ I(force_source), I(force_absent) and I(force_tag) instead.
+ - This option will be removed in community.general 2.0.0.
+ type: bool
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ - Please use I(build.http_timeout) instead. This option will be removed in community.general 2.0.0.
+ type: int
+ name:
+ description:
+ - "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
+ When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
+ - Note that image IDs (hashes) are not supported.
+ type: str
+ required: yes
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ - Set I(source) to C(build) if you want to build the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used. From community.general 2.0.0
+ on, you have to set I(source) to C(build).
+ - Please use I(build.path) instead. This option will be removed in community.general 2.0.0.
+ type: path
+ aliases:
+ - build_path
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - Please use I(build.pull) instead. This option will be removed in community.general 2.0.0.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: no
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ - Please use I(build.rm) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: yes
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ - Please use I(build.nocache) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: no
+ repository:
+ description:
+ - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
+ format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ - By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
+ this will change in community.general 2.0.0, so to make sure that you are pulling, set I(source) to C(pull). To build
+ the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
+ to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
+ a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
+ - "*Note:* C(state=build) is DEPRECATED and will be removed in community.general 2.0.0. Specifying C(build) will behave the
+ same as C(present)."
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ - build
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+ buildargs:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ - Please use I(build.args) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ - Please use I(build.container_limits) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_tls:
+ description:
+ - "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
+ C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
+ the server's certificate is valid for the server."
+ - "*Note:* If you specify this option, it will set the value of the I(tls) or
+ I(validate_certs) parameters if not set to C(no)."
+ - Will be removed in community.general 2.0.0.
+ type: str
+ choices:
+ - 'no'
+ - 'encrypt'
+ - 'verify'
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.general.docker_image:
+ name: pacur/centos-7
+ source: pull
+
+- name: Tag and push to docker hub
+ community.general.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: yes
+ source: local
+
+- name: Tag and push to local registry
+ community.general.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: yes
+ source: local
+
+- name: Add tag latest to image
+ community.general.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: yes
+ source: local
+
+- name: Remove image
+ community.general.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.general.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: yes
+ source: build
+
+- name: Archive image
+ community.general.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.general.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: yes
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.general.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.general.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.3.0
+'''
+
+import errno
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ clean_dict_booleans_for_docker_api,
+ docker_version,
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+if docker_version is not None:
+ try:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.auth import resolve_repository_name
+ else:
+ from docker.auth.auth import resolve_repository_name
+ from docker.utils.utils import parse_repository_tag
+ from docker.errors import DockerException
+ except ImportError:
+ # missing Docker SDK for Python handled in module_utils.docker.common
+ pass
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ self.archive_path = parameters.get('archive_path')
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters.get('force_source')
+ self.force_absent = parameters.get('force_absent')
+ self.force_tag = parameters.get('force_tag')
+ self.load_path = parameters.get('load_path')
+ self.name = parameters.get('name')
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters.get('repository')
+ self.rm = build.get('rm', True)
+ self.state = parameters.get('state')
+ self.tag = parameters.get('tag')
+ self.http_timeout = build.get('http_timeout')
+ self.push = parameters.get('push')
+ self.buildargs = build.get('args')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.remove_image(name, force=self.force_absent)
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, str(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name - name of the image. Type: str
+ :return None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ image = self.client.find_image(name=name, tag=tag)
+ if not image:
+ self.log("archive image: image %s:%s not found" % (name, tag))
+ return
+
+ image_name = "%s:%s" % (name, tag)
+ self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ image = self.client.get_image(image_name)
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, str(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ if self.client.docker_py_version >= LooseVersion('3.0.0'):
+ for chunk in image:
+ fd.write(chunk)
+ else:
+ for chunk in image.stream(2048, decode_content=False):
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
+
+ image = self.client.find_image(name=name, tag=tag)
+ if image:
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if re.search('unauthorized', str(exc)):
+ if re.search('authentication required', str(exc)):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, str(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, str(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ image_name = name
+ if tag and not re.search(tag, name):
+ image_name = "%s:%s" % (name, tag)
+ tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
+ if not tag_status:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % str(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ params = dict(
+ path=self.build_path,
+ tag=self.name,
+ rm=self.rm,
+ nocache=self.nocache,
+ timeout=self.http_timeout,
+ pull=self.pull,
+ forcerm=self.rm,
+ dockerfile=self.dockerfile,
+ decode=True,
+ )
+ if self.client.docker_py_version < LooseVersion('3.0.0'):
+ params['stream'] = True
+
+ if self.tag:
+ params['tag'] = "%s:%s" % (self.name, self.tag)
+ if self.container_limits:
+ params['container_limits'] = self.container_limits
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ self.buildargs[key] = to_native(value)
+ params['buildargs'] = self.buildargs
+ if self.cache_from:
+ params['cache_from'] = self.cache_from
+ if self.network:
+ params['network_mode'] = self.network
+ if self.extra_hosts:
+ params['extra_hosts'] = self.extra_hosts
+ if self.use_config_proxy:
+ params['use_config_proxy'] = self.use_config_proxy
+ # Due to a bug in docker-py, it will crash if
+ # use_config_proxy is True and buildargs is None
+ if 'buildargs' not in params:
+ params['buildargs'] = {}
+ if self.target:
+ params['target'] = self.target
+
+ build_output = []
+ for line in self.client.build(**params):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ build_line = line.get("stream") or line.get("status") or ''
+ build_output.append(build_line)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {"stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag)}
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ output = self.client.load_image(image_tar)
+ if output is not None:
+ # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
+ # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
+ # Note that before that commit, something else than None was returned, but that was also
+ # only introduced in a commit that first appeared in 2.5.0 (see
+ # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
+ # So the above check works for every released version of Docker SDK for Python.
+ has_output = True
+ for line in output:
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ load_line = line.get("stream") or line.get("status") or ''
+ load_output.append(load_line)
+ else:
+ if LooseVersion(docker_version) < LooseVersion('2.5.0'):
+ self.client.module.warn(
+ 'The installed version of the Docker SDK for Python does not return the loading results'
+ ' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
+ ' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
+ ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
+ ' (2.5.0 was released in August 2017).'
+ )
+ else:
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ expected_image = '%s:%s' % (self.name, self.tag)
+ if expected_image not in loaded_images:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(["'%s'" % image for image in sorted(loaded_images)]), ))
+
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool'),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ )),
+ archive_path=dict(type='path'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ dockerfile=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ http_timeout=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ nocache=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ path=dict(type='path', aliases=['build_path'], removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ pull=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ rm=dict(type='bool', default=True, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
+ tag=dict(type='str', default='latest'),
+ use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ buildargs=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ )
+
+ required_if = [
+ # ('state', 'present', ['source']), -- enable in community.general 2.0.0
+ # ('source', 'build', ['build']), -- enable in community.general 2.0.0
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_build_cache_from(client):
+ return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
+
+ def detect_build_network(client):
+ return client.module.params['build'] and client.module.params['build'].get('network') is not None
+
+ def detect_build_target(client):
+ return client.module.params['build'] and client.module.params['build'].get('target') is not None
+
+ def detect_use_config_proxy(client):
+ return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
+ option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
+ option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
+ option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
+ option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.20',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if client.module.params['state'] == 'build':
+ client.module.deprecate('The "build" state has been deprecated for a long time. '
+ 'Please use "present", which has the same meaning as "build".',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+ client.module.params['state'] = 'present'
+ if client.module.params['use_tls']:
+ client.module.deprecate('The "use_tls" option has been deprecated for a long time. '
+ 'Please use the "tls" and "validate_certs" options instead.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ build_options = dict(
+ container_limits='container_limits',
+ dockerfile='dockerfile',
+ http_timeout='http_timeout',
+ nocache='nocache',
+ path='path',
+ pull='pull',
+ rm='rm',
+ buildargs='args',
+ )
+ for option, build_option in build_options.items():
+ default_value = None
+ if option in ('rm', ):
+ default_value = True
+ elif option in ('nocache', ):
+ default_value = False
+ if client.module.params[option] != default_value:
+ if client.module.params['build'] is None:
+ client.module.params['build'] = dict()
+ if client.module.params['build'].get(build_option, default_value) != default_value:
+ client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
+ client.module.params['build'][build_option] = client.module.params[option]
+ client.module.deprecate('Please specify build.%s instead of %s. The %s option '
+ 'has been renamed' % (build_option, option, option),
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if client.module.params['source'] == 'build':
+ if (not client.module.params['build'] or not client.module.params['build'].get('path')):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+ if client.module.params['build'].get('pull') is None:
+ client.module.deprecate("The default for build.pull is currently 'yes', but will be changed to "
+ "'no' in community.general 2.0.0. Please set build.pull explicitly to the value you need",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ client.module.params['build']['pull'] = True # TODO: change to False in community.general 2.0.0
+
+ if client.module.params['state'] == 'present' and client.module.params['source'] is None:
+ # Autodetection. To be removed in community.general 2.0.0.
+ if (client.module.params['build'] or dict()).get('path'):
+ client.module.params['source'] = 'build'
+ elif client.module.params['load_path']:
+ client.module.params['source'] = 'load'
+ else:
+ client.module.params['source'] = 'pull'
+ client.module.deprecate('The value of the "source" option was determined to be "%s". '
+ 'Please set the "source" option explicitly. Autodetection will '
+ 'be removed in community.general 2.0.0.' % client.module.params['source'],
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ if client.module.params['force']:
+ client.module.params['force_source'] = True
+ client.module.params['force_absent'] = True
+ client.module.params['force_tag'] = True
+ client.module.deprecate('The "force" option will be removed in community.general 2.0.0. Please '
+ 'use the "force_source", "force_absent" or "force_tag" option '
+ 'instead, depending on what you want to force.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py
new file mode 100644
index 00000000..6522e642
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+#
+# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the "docker login" command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ email:
+ description:
+ - Does nothing, do not use.
+ - Will be removed in community.general 3.0.0.
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: no
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "L(Python bindings for docker credentials store API) >= 0.2.1
+ (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
+ - "Docker API >= 1.20"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.general.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: yes
+
+- name: Log into DockerHub using a custom config file
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.general.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when state='present'
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import re
+import traceback
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from docker.errors import DockerException
+ from docker import auth
+
+ # Earlier versions of docker/docker-py put decode_auth
+ # in docker.auth.auth instead of docker.auth
+ if hasattr(auth, 'decode_auth'):
+ from docker.auth import decode_auth
+ else:
+ from docker.auth.auth import decode_auth
+
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ HAS_DOCKER_PY,
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+ EMAIL_REGEX,
+ RequestException,
+)
+
+NEEDS_DOCKER_PYCREDS = False
+
+# Early versions of docker/docker-py rely on docker-pycreds for
+# the credential store api.
+if HAS_DOCKER_PY:
+ try:
+ from docker.credentials.errors import StoreError, CredentialsNotFound
+ from docker.credentials import Store
+ except ImportError:
+ try:
+ from dockerpycreds.errors import StoreError, CredentialsNotFound
+ from dockerpycreds.store import Store
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ NEEDS_DOCKER_PYCREDS = True
+
+
+if NEEDS_DOCKER_PYCREDS:
+ # docker-pycreds missing, so we need to create some place holder classes
+ # to allow instantiation.
+
+ class StoreError(Exception):
+ pass
+
+ class CredentialsNotFound(Exception):
+ pass
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.email = parameters.get('email')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ if self.email and not re.match(EMAIL_REGEX, self.email):
+ self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
+ "/%s/" % (EMAIL_REGEX))
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=self.reauthorize,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=True,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ # Older versions of docker-py don't have this feature.
+ try:
+ credstore_env = self.client.credstore_env
+ except AttributeError:
+ credstore_env = None
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ if hasattr(auth, 'get_credential_store'):
+ store_name = auth.get_credential_store(config, registry)
+ elif 'credsStore' in config:
+ store_name = config['credsStore']
+ else:
+ store_name = None
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
new file mode 100644
index 00000000..f70cc67d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the "docker network" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: yes
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: no
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: no
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_options:
+ description:
+ - Dictionary of IPAM options.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
+ options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
+ the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
+ parameter.
+ type: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.10.0"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.general.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.general.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: yes
+
+- name: Create a network with driver options
+ community.general.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.general.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+
+- name: Create a network with labels
+ community.general.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.general.docker_network:
+ name: network_one
+ state: absent
+ force: yes
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ docker_version,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.types import IPAMPool, IPAMConfig
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
+ self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
+ self.parameters.ipam_config = [self.parameters.ipam_options]
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(str(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ params = dict(
+ driver=self.parameters.driver,
+ options=self.parameters.driver_options,
+ )
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ ipam_pools.append(IPAMPool(**ipam_pool))
+ else:
+ ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add ipam parameter if a driver was specified or if IPAM parameters
+ # were specified. Leaving this parameter away can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools,
+ options=self.parameters.ipam_driver_options)
+ else:
+ params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools)
+
+ if self.parameters.enable_ipv6 is not None:
+ params['enable_ipv6'] = self.parameters.enable_ipv6
+ if self.parameters.internal is not None:
+ params['internal'] = self.parameters.internal
+ if self.parameters.scope is not None:
+ params['scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ params['attachable'] = self.parameters.attachable
+ if self.parameters.labels:
+ params['labels'] = self.parameters.labels
+
+ if not self.check_mode:
+ resp = self.client.create_network(self.parameters.name, **params)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.remove_network(self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ self.client.connect_container_to_network(name, self.parameters.name)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name),
+ parameter=True,
+ active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ self.client.disconnect_container_from_network(container_name, self.parameters.name)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['ansible_facts'] = {u'docker_network': network_facts}
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_options=dict(type='dict', default={}, options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ mutually_exclusive = [
+ ('ipam_config', 'ipam_options')
+ ]
+
+ option_minimal_versions = dict(
+ scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
+ labels=dict(docker_api_version='1.23'),
+ ipam_driver_options=dict(docker_py_version='2.0.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.22',
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py
new file mode 100644
index 00000000..feeff6a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.general.docker_network)
+ returns for a non-absent network.
+
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.general.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py
new file mode 100644
index 00000000..12980e5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: yes
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.general.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.general.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.general.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.general.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.general.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py
new file mode 100644
index 00000000..c01edadc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (i.e. the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.general.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.general.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.general.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.general.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py
new file mode 100644
index 00000000..025c6130
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ - Requires version 3.3.0 of the Docker SDK for Python or newer.
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.general.docker_prune:
+ containers: yes
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+
+- name: Prune everything (including non-dangling images)
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ images_filters:
+ dangling: false
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: '0'
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: '0'
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: '0'
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: '0'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+try:
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api
+except Exception as dummy:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ min_docker_api_version='1.25',
+ min_docker_version='2.1.0',
+ )
+
+ # Version checks
+ cache_min_version = '3.3.0'
+ if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
+ msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
+ client.fail(msg % (docker_version, cache_min_version))
+
+ try:
+ result = dict()
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ res = client.prune_containers(filters=filters)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ res = client.prune_images(filters=filters)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ res = client.prune_networks(filters=filters)
+ result['networks'] = res.get('NetworksDeleted') or []
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ res = client.prune_volumes(filters=filters)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['builder_cache']:
+ res = client.prune_builds()
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py
new file mode 100644
index 00000000..b6ce7f28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+options:
+ data:
+ description:
+ - The value of the secret. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.general.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the secret data
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove secret foo
+ community.general.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ for secret in secrets:
+ if secret['Spec']['Name'] == self.name:
+ return secret
+ return None
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ secret = self.get_secret()
+ if secret:
+ self.results['secret_id'] = secret['ID']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ secret = self.get_secret()
+ if secret:
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.1.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py
new file mode 100644
index 00000000..d3089e20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the 'docker stack' command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: yes
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: no
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: no
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+
+notes:
+ - Return values I(out) and I(err) have been deprecated and will be removed in community.general 3.0.0. Use I(stdout) and I(stderr) instead.
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.general.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a " +
+ "string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ err=err, # Deprecated
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py
new file mode 100644
index 00000000..74a3648d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+version_added: "1.0.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample: >
+ "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
+ returned: always
+ type: list
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=False
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py
new file mode 100644
index 00000000..966a4266
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: yes
+version_added: "1.1.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=False
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py
new file mode 100644
index 00000000..52f37643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py
@@ -0,0 +1,675 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.general.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(community.general.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.general.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.general.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.general.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.general.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.general.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.general.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ community.general.docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py
new file mode 100644
index 00000000..f6d5fad1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: no
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: no
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.general.docker_swarm_info:
+ ignore_errors: yes
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.general.docker_swarm_info:
+ unlock_key: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(yes)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(services) is C(yes)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(yes)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py
new file mode 100644
index 00000000..7c6f23a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py
@@ -0,0 +1,3004 @@
+#!/usr/bin/python
+#
+# (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(placement.constraints) instead.
+ type: list
+ elements: str
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (e.g. C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: bool
+ default: no
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Requires API version >= 1.25.
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ version_added: '0.2.0'
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ limit_cpu:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.cpus) instead.
+ type: float
+ limit_memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.memory) instead.
+ type: str
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ log_driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.driver) instead.
+ type: str
+ log_driver_options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.options) instead.
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: yes
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: yes
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: yes
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: yes
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ reserve_cpu:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.cpus) instead.
+ type: float
+ reserve_memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.memory) instead.
+ type: str
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: no
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ restart_policy:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.condition) instead.
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ restart_policy_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.max_attempts) instead.
+ type: int
+ restart_policy_delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.delay) instead.
+ type: raw
+ restart_policy_window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.window) instead.
+ type: raw
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ update_delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(10).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.delay) instead.
+ type: raw
+ update_parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(1).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.parallelism) instead.
+ type: int
+ update_failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.failure_action) instead.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ update_monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.monitor) instead.
+ type: raw
+ update_max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.max_failure_ratio) instead.
+ type: float
+ update_order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.order) instead.
+ type: str
+ choices:
+ - stop-first
+ - start-first
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.24"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+
+- name: Set configs
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.general.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.general.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import operator
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return (
+ self.docker_api_version >= LooseVersion('1.25') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ default=params['restart_policy']
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ default=params['restart_policy_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ default=params['restart_policy_attempts']
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ default=params['restart_policy_window']
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ default=params['update_parallelism']
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ default=params['update_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ default=params['update_failure_action']
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ default=params['update_monitor']
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ default=params['update_max_failure_ratio']
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ default=params['update_order']
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ default=params['log_driver']
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ default=params['log_driver_options']
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ default=params['limit_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ default=params['limit_memory']
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ default=params['reserve_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ default=params['reserve_memory']
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement,
+ default=params['constraints']
+ )
+
+ preferences = placement.get('preferences')
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+ publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'PublishedPort': port['published_port'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': int(port['PublishedPort']),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, e)
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], e)
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % e
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ update_failure_action = client.module.params['update_failure_action']
+ failure_action = rollback_config_failure_action or update_failure_action
+ return failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', options=dict(
+ secret_id=dict(type='str'),
+ secret_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ log_driver=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ log_driver_options=dict(type='dict', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=True),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ )),
+ constraints=dict(type='list', elements='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ limit_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ limit_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ reserve_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ reserve_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ restart_policy=dict(
+ type='str',
+ choices=['none', 'on-failure', 'any'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ restart_policy_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_attempts=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_window=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_parallelism=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ update_monitor=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_max_failure_ratio=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_order=dict(
+ type='str',
+ choices=['stop-first', 'start-first'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ constraints=dict(docker_py_version='2.4.0'),
+ dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ docker_api_version='1.25',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py
new file mode 100644
index 00000000..130be7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+#
+# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.general.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py
new file mode 100644
index 00000000..dca92df5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the "docker volume" CLI subcommand.
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: yes
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
+ type: dict
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ force:
+ description:
+ - With state C(present) causes the volume to be deleted and recreated if the volume already
+ exist and the driver, driver options or labels differ. This will cause any data in the existing
+ volume to be lost.
+ - Deprecated. Will be removed in community.general 2.0.0. Set I(recreate) to C(options-changed) instead
+ for the same behavior of setting I(force) to C(yes).
+ type: bool
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause **any data in the existing volume
+ to be lost!** The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.9.0"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.general.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.general.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.general.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_volume). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ AnsibleDockerClient,
+ DifferenceTracker,
+ RequestException,
+)
+from ansible.module_utils.six import iteritems, text_type
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.force = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+ if self.force is not None:
+ if self.recreate != 'never':
+ client.fail('Cannot use the deprecated "force" '
+ 'option when "recreate" is set. Please stop '
+ 'using the force option.')
+ client.module.warn('The "force" option of docker_volume has been deprecated '
+ 'in Ansible 2.8. Please use the "recreate" '
+ 'option, which provides the same functionality as "force".')
+ self.recreate = 'options-changed' if self.force else 'never'
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.volumes()
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ params = dict(
+ driver=self.parameters.driver,
+ driver_opts=self.parameters.driver_options,
+ )
+
+ if self.parameters.labels is not None:
+ params['labels'] = self.parameters.labels
+
+ resp = self.client.create_volume(self.parameters.volume_name, **params)
+ self.existing_volume = self.client.inspect_volume(resp['Name'])
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.remove_volume(self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['ansible_facts'] = {u'docker_volume': volume_facts}
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ # "The docker server >= 1.9.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py
new file mode 100644
index 00000000..c00c2425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the "docker volume inspect" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: yes
+ aliases:
+ - volume_name
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.general.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.inspect_volume(volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % exc)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py
new file mode 100644
index 00000000..52ca18fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.general.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.general.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.general.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py
new file mode 100644
index 00000000..b97377b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_resource_record_set) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given resource record should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ type: str
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ zone_id:
+ type: str
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ type:
+ type: str
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ type: list
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ type: int
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ type: bool
+ default: 'no'
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+- name: Create an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+- name: Update an existing record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+- name: Remove an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+- name: Create a CNAME record. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.'
+
+- name: Create an MX record with a custom TTL. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.'
+
+- name: Create multiple A records with the same name
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Change the value of an existing record with multiple record_data
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Safely remove a multi-line record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Unconditionally remove a record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+- name: Create an AAAA record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+- name: Create a PTR record
+ community.general.gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+- name: Create an NS record
+ community.general.gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+- name: Create a TXT record
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: bool
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: str
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: str
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: str
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: str
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: str
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg='cannot overwrite existing record, overwrite protection enabled',
+ changed=False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg='value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed=False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg="non-CNAME resource record already exists: %s" % record_name,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg='error updating record, the original record was restored',
+ changed=False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can restore it if
+ # necessary.
+ module.fail_json(
+ msg='error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed=True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg='cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed=False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg='TTL cannot be less than zero, got: %d' % ttl,
+ changed=False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid A record value, got: %s' % value,
+ changed=False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid AAAA record value, got: %s' % value,
+ changed=False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg='CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg="wildcard NS records not allowed, got: %s" % record_name,
+ changed=False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg='TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed=False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg='CNAME records cannot match the zone name',
+ changed=False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg='cannot delete root NS records',
+ changed=False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg='cannot update existing root NS records',
+ changed=False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg='non-root SOA records are not permitted, got: %s' % record_name,
+ changed=False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ record=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(type='str'),
+ zone_id=dict(type='str'),
+ type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data=dict(aliases=['value'], type='list'),
+ ttl=dict(default=300, type='int'),
+ overwrite=dict(default=False, type='bool'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ required_if=[
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of=[['zone', 'zone_id']],
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state=state,
+ record=record_name,
+ zone=zone_name,
+ zone_id=zone_id,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl,
+ overwrite=module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg='zone name was not found: %s' % zone_name,
+ changed=False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg='zone id was not found: %s' % zone_id,
+ changed=False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg='record name is invalid: %s' % record_name,
+ changed=False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record=record.data['name'],
+ type=record.data['type'],
+ record_data=record.data['rrdatas'],
+ ttl=record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record=record_name,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py
new file mode 100644
index 00000000..6f66b5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_managed_zone) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given zone should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ type: str
+ description:
+ - An arbitrary text string to use for the zone description.
+ default: ""
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ community.general.gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ community.general.gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ community.general.gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: str
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: str
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: str
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description=description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg="zone name is not a valid DNS name: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg="zone name is reserved or already in use: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg='cannot create top-level domain: %s' % zone_name,
+ changed=False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ zone=dict(required=True, aliases=['name'], type='str'),
+ description=dict(default='', type='str'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state=state,
+ zone=zone_name,
+ description=module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone=zone.domain,
+ description=zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone=zone_name,
+ description=module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py
new file mode 100644
index 00000000..7e658786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/compute) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_instance) instead.
+options:
+ image:
+ type: str
+ description:
+ - image string to use for the instance (default will follow latest
+ stable debian image)
+ default: "debian-8"
+ image_family:
+ type: str
+ description:
+ - image family from which to select the image. The most recent
+ non-deprecated image in the family will be used.
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+ instance_names:
+ type: str
+ description:
+ - a comma-separated list of instance names to create or destroy
+ machine_type:
+ type: str
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ default: "n1-standard-1"
+ metadata:
+ type: str
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ name:
+ type: str
+ description:
+ - either a name of a single instance or when used with 'num_instances',
+ the base name of a cluster of nodes
+ aliases: ['base_name']
+ num_instances:
+ type: int
+ description:
+ - can be used with 'name', specifies
+ the number of nodes to provision using 'name'
+ as a base name
+ network:
+ type: str
+ description:
+ - name of the network, 'default' will be used if not specified
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - name of the subnetwork in which the instance should be created
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ type: bool
+ default: 'no'
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ zone:
+ type: str
+ description:
+ - the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
+ default: "us-central1-a"
+ ip_forward:
+ description:
+ - set to C(yes) if the instance can forward ip packets (useful for
+ gateways)
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
+ default: "ephemeral"
+ disk_auto_delete:
+ description:
+ - if set boot disk will be removed after instance destruction
+ type: bool
+ default: 'yes'
+ preemptible:
+ description:
+ - if set to C(yes), instances will be preemptible and time-limited.
+ (requires libcloud >= 0.20.0)
+ type: bool
+ disk_size:
+ type: int
+ description:
+ - The size of the boot disk created for this instance (in GB)
+ default: 10
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - Either I(instance_names) or I(name) is required.
+ - JSON credentials strongly preferred.
+author:
+ - Eric Johnson (@erjohnso) <erjohnso@google.com>
+ - Tom Melendez (@supertom) <supertom@google.com>
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 8 instance in the
+# us-central1-a Zone of the n1-standard-1 machine type.
+# Create multiple instances by specifying multiple names, separated by
+# commas in the instance_names field
+# (e.g. my-test-instance1,my-test-instance2)
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single instance of an image from the "my-base-image" image family
+# in the us-central1-a Zone of the n1-standard-1 machine type.
+# This image family is in the "my-other-project" GCP project.
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image_family: my-base-image
+ external_projects:
+ - my-other-project
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single Debian 8 instance in the us-central1-a Zone
+# Use existing disks, custom network/subnetwork, set service account permissions
+# add tags and metadata.
+ - community.general.gce:
+ instance_names: my-test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ state: present
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+ tags:
+ - http-server
+ - my-other-tag
+ disks:
+ - name: disk-2
+ mode: READ_WRITE
+ - name: disk-3
+ mode: READ_ONLY
+ disk_auto_delete: false
+ network: foobar-network
+ subnetwork: foobar-subnetwork-1
+ preemptible: true
+ ip_forward: true
+ service_account_permissions:
+ - storage-full
+ - taskqueue
+ - bigquery
+ - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+---
+# Example Playbook
+- name: Compute Engine Instance Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create multiple instances
+ # Basic provisioning example. Create multiple Debian 8 instances in the
+ # us-central1-a Zone of n1-standard-1 machine type.
+ community.general.gce:
+ instance_names: test1,test2,test3
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ metadata : '{ "startup-script" : "apt-get update" }'
+ register: gce
+
+ - name: Save host data
+ ansible.builtin.add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: gce_instances_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for SSH for instances
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 30
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Configure Hosts
+ hosts: gce_instances_ips
+ become: yes
+ become_method: sudo
+ roles:
+ - my-role-one
+ - my-role-two
+ tags:
+ - config
+
+ - name: Delete test-instances
+ # Basic termination of instance.
+ community.general.gce:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ instance_names: "{{ gce.instance_names }}"
+ zone: us-central1-a
+ state: absent
+ tags:
+ - delete
+'''
+
+import socket
+import logging
+
+try:
+ from ast import literal_eval
+
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+from ansible_collections.community.general.plugins.module_utils.gcp import get_valid_location
+from ansible.module_utils.six.moves import reduce
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except Exception:
+ netname = None
+ try:
+ subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ except Exception:
+ subnetname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return ({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'subnetwork': subnetname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names, number, lc_zone):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ community.general.gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+ number: number of instances to create
+ lc_zone: GCEZone object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ tags = module.params.get('tags')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ preemptible = module.params.get('preemptible')
+ disk_size = module.params.get('disk_size')
+ service_account_permissions = module.params.get('service_account_permissions')
+
+ if external_ip == "none":
+ instance_external_ip = None
+ elif external_ip != "ephemeral":
+ instance_external_ip = external_ip
+ try:
+ # check if instance_external_ip is an ip or a name
+ try:
+ socket.inet_aton(instance_external_ip)
+ instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
+ except socket.error:
+ instance_external_ip = gce.ex_get_address(instance_external_ip)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
+ else:
+ instance_external_ip = external_ip
+
+ new_instances = []
+ changed = False
+
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk, lc_zone))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ gce_args = dict(
+ location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_can_ip_forward=ip_forward,
+ external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ if preemptible is not None:
+ gce_args['ex_preemptible'] = preemptible
+ if subnetwork is not None:
+ gce_args['ex_subnetwork'] = subnetwork
+
+ if isinstance(instance_names, str) and not number:
+ instance_names = [instance_names]
+
+ if isinstance(instance_names, str) and number:
+ instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
+ lc_image(), number, **gce_args)
+ for resp in instance_responses:
+ n = resp
+ if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
+ try:
+ n = gce.ex_get_node(n.name, lc_zone)
+ except ResourceNotFoundError:
+ pass
+ else:
+ # Assure that at least one node has been created to set changed=True
+ changed = True
+ new_instances.append(n)
+ else:
+ for instance in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.ex_get_volume("%s" % instance, lc_zone)
+ except ResourceNotFoundError:
+ pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
+ gce_args['ex_boot_disk'] = pd
+
+ inst = None
+ try:
+ inst = gce.ex_get_node(instance, lc_zone)
+ except ResourceNotFoundError:
+ inst = gce.create_node(
+ instance, lc_machine_type, lc_image(), **gce_args
+ )
+ changed = True
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (instance, e.value))
+ if inst:
+ new_instances.append(inst)
+
+ for inst in new_instances:
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i + 1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def change_instance_state(module, gce, instance_names, number, zone, state):
+ """Changes the state of a list of instances. For example,
+ change from started to stopped, or started to absent.
+
+ module: Ansible module object
+ community.general.gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone: GCEZone object where the instances reside prior to termination
+ state: 'state' parameter passed into module as argument
+
+ Returns a dictionary of instance names that were changed.
+
+ """
+ changed = False
+ nodes = []
+ state_instance_names = []
+
+ if isinstance(instance_names, str) and number:
+ node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
+ elif isinstance(instance_names, str) and not number:
+ node_names = [instance_names]
+ else:
+ node_names = instance_names
+
+ for name in node_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone)
+ except ResourceNotFoundError:
+ state_instance_names.append(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ else:
+ nodes.append(inst)
+ state_instance_names.append(name)
+
+ if state in ['absent', 'deleted'] and number:
+ changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
+ changed = reduce(lambda x, y: x or y, changed_nodes)
+ else:
+ for node in nodes:
+ if state in ['absent', 'deleted']:
+ gce.destroy_node(node)
+ changed = True
+ elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
+ gce.ex_start_node(node)
+ changed = True
+ elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
+ gce.ex_stop_node(node)
+ changed = True
+
+ return (changed, state_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-8'),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(aliases=['base_name']),
+ num_instances=dict(type='int'),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted',
+ 'started', 'stopped', 'terminated'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ disk_size=dict(type='int', default=10),
+ preemptible=dict(type='bool', default=None),
+ ),
+ mutually_exclusive=[('instance_names', 'name')]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ instance_names = module.params.get('instance_names')
+ name = module.params.get('name')
+ number = module.params.get('num_instances')
+ subnetwork = module.params.get('subnetwork')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+ preemptible = module.params.get('preemptible')
+ changed = False
+
+ inames = None
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames = name
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ lc_zone = get_valid_location(module, gce, zone)
+ if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
+ module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
+ changed=False)
+
+ if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
+ changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
+ json_output['state'] = state
+ (changed, state_instance_names) = change_instance_state(
+ module, gce, inames, number, lc_zone, state)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names or name and number:
+ json_output['instance_names'] = state_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames, number, lc_zone)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+class LazyDiskImage:
+ """
+ Object for lazy instantiation of disk image
+ gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
+ """
+
+ def __init__(self, module, gce, name, has_pd, family=None, projects=None):
+ self.image = None
+ self.was_called = False
+ self.gce = gce
+ self.name = name
+ self.has_pd = has_pd
+ self.module = module
+ self.family = family
+ self.projects = projects
+
+ def __call__(self):
+ if not self.was_called:
+ self.was_called = True
+ if not self.has_pd:
+ if self.family:
+ self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
+ else:
+ self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
+ if not self.image:
+ self.module.fail_json(msg='image or disks missing for create instance', changed=False)
+ return self.image
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py
new file mode 100644
index 00000000..b5fd4bf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py
new file mode 100644
index 00000000..c4705098
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.general.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.general.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.general.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.general.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py
new file mode 100644
index 00000000..04ddacce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py
new file mode 100644
index 00000000..dced7599
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py
new file mode 100644
index 00000000..50e26a58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py
new file mode 100644
index 00000000..42db08bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py
new file mode 100644
index 00000000..48971ae7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.general.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.general.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.general.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.general.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py
new file mode 100644
index 00000000..7e60285f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py
new file mode 100644
index 00000000..4fca1b05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py
new file mode 100644
index 00000000..1e36ed4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.general.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.general.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.general.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py
new file mode 100644
index 00000000..ee564ae0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gcp_backend_service
+short_description: Create or Destroy a Backend Service.
+description:
+ - Create or Destroy a Backend Service. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service) for an overview.
+ Full install/configuration instructions for the Google Cloud modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.3.0"
+notes:
+ - Update is not currently supported.
+ - Only global backend services are currently supported. Regional backends not currently supported.
+ - Internal load balancing not currently supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_backend_service) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ backend_service_name:
+ type: str
+ description:
+ - Name of the Backend Service.
+ required: true
+ backends:
+ type: list
+ description:
+ - List of backends that make up the backend service. A backend is made up of
+ an instance group and optionally several other parameters. See
+ U(https://cloud.google.com/compute/docs/reference/latest/backendServices)
+ for details.
+ required: true
+ healthchecks:
+ type: list
+ description:
+ - List of healthchecks. Only one healthcheck is supported.
+ required: true
+ enable_cdn:
+ description:
+ - If true, enable Cloud CDN for this Backend Service.
+ type: bool
+ port_name:
+ type: str
+ description:
+ - Name of the port on the managed instance group (MIG) that backend
+ services can forward data to. Required for external load balancing.
+ protocol:
+ type: str
+ description:
+ - The protocol this Backend Service uses to communicate with backends.
+ Possible values are HTTP, HTTPS, TCP, and SSL. The default is TCP.
+ choices: [HTTP, HTTPS, TCP, SSL]
+ default: TCP
+ required: false
+ timeout:
+ type: int
+ description:
+ - How many seconds to wait for the backend before considering it a failed
+ request. Default is 30 seconds. Valid range is 1-86400.
+ required: false
+ service_account_email:
+ type: str
+ description:
+ - Service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email.
+ pem_file:
+ type: str
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - GCE project ID.
+ state:
+ type: str
+ description:
+ - Desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum Backend Service
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+
+- name: Create BES with extended backend parameters
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ max_utilization: 0.6
+ max_rate: 10
+ - instance_group: managed_instance_group_2
+ max_utilization: 0.5
+ max_rate: 4
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+ timeout: 60
+'''
+
+RETURN = '''
+backend_service_created:
+ description: Indicator Backend Service was created.
+ returned: When a Backend Service is created.
+ type: bool
+ sample: "True"
+backend_service_deleted:
+ description: Indicator Backend Service was deleted.
+ returned: When a Backend Service is deleted.
+ type: bool
+ sample: "True"
+backend_service_name:
+ description: Name of the Backend Service.
+ returned: Always.
+ type: str
+ sample: "my-backend-service"
+backends:
+ description: List of backends (comprised of instance_group) that
+ make up a Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ { 'instance_group': 'mig_one', 'zone': 'us-central1-b'} ]"
+enable_cdn:
+ description: If Cloud CDN is enabled. null if not set.
+ returned: When a backend service exists.
+ type: bool
+ sample: "True"
+healthchecks:
+ description: List of healthchecks applied to the Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ 'my-healthcheck' ]"
+protocol:
+ description: Protocol used to communicate with the Backends.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "HTTP"
+port_name:
+ description: Name of Backend Port.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "myhttpport"
+timeout:
+ description: In seconds, how long before a request sent to a backend is
+ considered failed.
+ returned: If specified.
+ type: int
+ sample: "myhttpport"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params
+
+
+def _validate_params(params):
+ """
+ Validate backend_service params.
+
+ This function calls _validate_backend_params to verify
+ the backend-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'timeout', 'type': int, 'min': 1, 'max': 86400},
+ ]
+ try:
+ check_params(params, fields)
+ _validate_backend_params(params['backends'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_backend_params(backends):
+ """
+ Validate configuration for backends.
+
+ :param backends: Ansible dictionary containing backends configuration (only).
+ :type backends: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'balancing_mode', 'type': str, 'values': ['UTILIZATION', 'RATE', 'CONNECTION']},
+ {'name': 'max_utilization', 'type': float},
+ {'name': 'max_connections', 'type': int},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+
+ if not backends:
+ raise ValueError('backends should be a list.')
+
+ for backend in backends:
+ try:
+ check_params(backend, fields)
+ except Exception:
+ raise
+
+ if 'max_rate' in backend and 'max_rate_per_instance' in backend:
+ raise ValueError('Both maxRate or maxRatePerInstance cannot be set.')
+
+ return (True, '')
+
+
+def get_backend_service(gce, name):
+ """
+ Get a Backend Service from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Backend Service.
+ :type name: ``str``
+
+ :return: A GCEBackendService object or None.
+ :rtype: :class: `GCEBackendService` or None
+ """
+ try:
+ # Does the Backend Service already exist?
+ return gce.ex_get_backendservice(name=name)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def get_healthcheck(gce, name):
+ return gce.ex_get_healthcheck(name)
+
+
+def get_instancegroup(gce, name, zone=None):
+ return gce.ex_get_instancegroup(name=name, zone=zone)
+
+
+def create_backend_service(gce, params):
+ """
+ Create a new Backend Service.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats
+ :rtype: tuple in the format of (bool, bool)
+ """
+ from copy import deepcopy
+
+ changed = False
+ return_data = False
+ # only one healthcheck is currently supported
+ hc_name = params['healthchecks'][0]
+ hc = get_healthcheck(gce, hc_name)
+ backends = []
+ for backend in params['backends']:
+ ig = get_instancegroup(gce, backend['instance_group'],
+ backend.get('zone', None))
+ kwargs = deepcopy(backend)
+ kwargs['instance_group'] = ig
+ backends.append(gce.ex_create_backend(
+ **kwargs))
+
+ bes = gce.ex_create_backendservice(
+ name=params['backend_service_name'], healthchecks=[hc], backends=backends,
+ enable_cdn=params['enable_cdn'], port_name=params['port_name'],
+ timeout_sec=params['timeout'], protocol=params['protocol'])
+
+ if bes:
+ changed = True
+ return_data = True
+
+ return (changed, return_data)
+
+
+def delete_backend_service(bes):
+ """
+ Delete a Backend Service. The Instance Groups are NOT destroyed.
+ """
+ changed = False
+ return_data = False
+ if bes.destroy():
+ changed = True
+ return_data = True
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ backends=dict(type='list', required=True),
+ backend_service_name=dict(required=True),
+ healthchecks=dict(type='list', required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ enable_cdn=dict(type='bool'),
+ port_name=dict(type='str'),
+ protocol=dict(type='str', default='TCP',
+ choices=['HTTP', 'HTTPS', 'SSL', 'TCP']),
+ timeout=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['backend_service_name'] = module.params.get('backend_service_name')
+ params['backends'] = module.params.get('backends')
+ params['healthchecks'] = module.params.get('healthchecks')
+ params['enable_cdn'] = module.params.get('enable_cdn', None)
+ params['port_name'] = module.params.get('port_name', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['timeout'] = module.params.get('timeout', None)
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ bes = get_backend_service(gce, params['backend_service_name'])
+
+ if not bes:
+ if params['state'] == 'absent':
+ # Doesn't exist and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown backend service: %s" %
+ (params['backend_service_name']))
+ else:
+ # Create
+ (changed, json_output['backend_service_created']) = create_backend_service(gce,
+ params)
+ elif params['state'] == 'absent':
+ # Delete
+ (changed, json_output['backend_service_deleted']) = delete_backend_service(bes)
+ else:
+ # TODO(supertom): Add update support when it is available in libcloud.
+ changed = False
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py
new file mode 100644
index 00000000..56dbfa7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_forwarding_rule
+short_description: Create, Update or Destroy a Forwarding_Rule.
+description:
+ - Create, Update or Destroy a Forwarding_Rule. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Global Forwarding_Rule API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules)
+ More details on the Forwarding Rules API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/forwardingRules)
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_forwarding_rule) or M(google.cloud.gcp_compute_global_forwarding_rule) instead.
+notes:
+ - Currently only supports global forwarding rules.
+ As such, Load Balancing Scheme is always EXTERNAL.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ address:
+ type: str
+ description:
+ - IPv4 or named IP address. Must be of the same scope (regional, global).
+ Reserved addresses can (and probably should) be used for global
+ forwarding rules. You may reserve IPs from the console or
+ via the gce_eip module.
+ required: false
+ forwarding_rule_name:
+ type: str
+ description:
+ - Name of the Forwarding_Rule.
+ required: true
+ port_range:
+ type: str
+ description:
+ - For global forwarding rules, must be set to 80 or 8080 for TargetHttpProxy, and
+ 443 for TargetHttpsProxy or TargetSslProxy.
+ required: false
+ protocol:
+ type: str
+ description:
+ - For global forwarding rules, TCP, UDP, ESP, AH, SCTP or ICMP. Default is TCP.
+ required: false
+ choices: [TCP]
+ default: TCP
+ region:
+ type: str
+ description:
+ - The region for this forwarding rule. Currently, only 'global' is supported.
+ required: true
+ state:
+ type: str
+ description:
+ - The state of the Forwarding Rule. 'present' or 'absent'
+ required: true
+ choices: ["present", "absent"]
+ target:
+ type: str
+ description:
+ - Target resource for forwarding rule. For global proxy, this is a Global
+ TargetProxy resource. Required for external load balancing (including Global load balancing)
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ load_balancing_scheme:
+ type: str
+ choices: [EXTERNAL]
+ default: EXTERNAL
+ description:
+ - Load balancing scheme. At the moment the only choice is EXTERNAL.
+'''
+
+EXAMPLES = '''
+- name: Create Minimum GLOBAL Forwarding_Rule
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ region: global
+ target: my-target-proxy
+ state: present
+
+- name: Create Forwarding_Rule w/reserved static address
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ address: my-reserved-static-address-name
+ region: global
+ target: my-target-proxy
+ state: present
+'''
+
+RETURN = '''
+forwarding_rule_name:
+ description: Name of the Forwarding_Rule
+ returned: Always
+ type: str
+ sample: my-target-proxy
+forwarding_rule:
+ description: GCP Forwarding_Rule dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-forwarding_rule", "target": "..." }
+region:
+ description: Region for Forwarding Rule.
+ returned: Always
+ type: bool
+ sample: true
+state:
+ description: state of the Forwarding_Rule
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-forwarding_rule'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_global_forwarding_rule_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'forwarding_rule_name')
+ if 'target' in gcp_dict:
+ gcp_dict['target'] = '%s/global/targetHttpProxies/%s' % (url,
+ gcp_dict['target'])
+ if 'address' in gcp_dict:
+ gcp_dict['IPAddress'] = '%s/global/addresses/%s' % (url,
+ gcp_dict['address'])
+ del gcp_dict['address']
+ if 'protocol' in gcp_dict:
+ gcp_dict['IPProtocol'] = gcp_dict['protocol']
+ del gcp_dict['protocol']
+ return gcp_dict
+
+
+def get_global_forwarding_rule(client, name, project_id=None):
+ """
+ Get a Global Forwarding Rule from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.globalForwardingRules().get(
+ project=project_id, forwardingRule=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_global_forwarding_rule(client, params, project_id):
+ """
+ Create a new Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+ try:
+ req = client.globalForwardingRules().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_global_forwarding_rule(client, name, project_id):
+ """
+ Delete a Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.globalForwardingRules().delete(
+ project=project_id, forwardingRule=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_global_forwarding_rule(client, forwarding_rule, params, name, project_id):
+ """
+ Update a Global Forwarding_Rule. Currently, only a target can be updated.
+
+ If the forwarding_rule has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param forwarding_rule: Name of the Target Proxy.
+ :type forwarding_rule: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+
+ GCPUtils.are_params_equal(forwarding_rule, gcp_dict)
+ if forwarding_rule['target'] == gcp_dict['target']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.globalForwardingRules().setTarget(project=project_id,
+ forwardingRule=name,
+ body={'target': gcp_dict['target']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ forwarding_rule_name=dict(required=True),
+ region=dict(required=True),
+ target=dict(required=False),
+ address=dict(type='str', required=False),
+ protocol=dict(required=False, default='TCP', choices=['TCP']),
+ port_range=dict(required=False),
+ load_balancing_scheme=dict(
+ required=False, default='EXTERNAL', choices=['EXTERNAL']),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['forwarding_rule_name'] = module.params.get('forwarding_rule_name')
+ params['region'] = module.params.get('region')
+ params['target'] = module.params.get('target', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['port_range'] = module.params.get('port_range')
+ if module.params.get('address', None):
+ params['address'] = module.params.get('address', None)
+
+ if params['region'] != 'global':
+ # This module currently doesn't support regional rules.
+ module.fail_json(
+ msg=("%s - Only global forwarding rules currently supported. "
+ "Be sure to specify 'global' for the region option.") %
+ (params['forwarding_rule_name']))
+
+ changed = False
+ json_output = {'state': params['state']}
+ forwarding_rule = None
+ if params['region'] == 'global':
+ forwarding_rule = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ if not forwarding_rule:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown forwarding_rule: %s" %
+ (params['forwarding_rule_name']))
+ else:
+ # Create
+ changed, json_output['forwarding_rule'] = create_global_forwarding_rule(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['forwarding_rule'] = delete_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['forwarding_rule'] = update_global_forwarding_rule(client,
+ forwarding_rule=forwarding_rule,
+ params=params,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py
new file mode 100644
index 00000000..19b28653
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_healthcheck
+short_description: Create, Update or Destroy a Healthcheck.
+description:
+ - Create, Update or Destroy a Healthcheck. Currently only HTTP and
+ HTTPS Healthchecks are supported. Healthchecks are used to monitor
+ individual instances, managed instance groups and/or backend
+ services. Healtchecks are reusable.
+ - Visit
+ U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
+ for an overview of Healthchecks on GCP.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
+ API details on HTTP Healthchecks.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
+ for more details on the HTTPS Healtcheck API.
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports HTTP and HTTPS Healthchecks currently.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: >
+ Use M(google.cloud.gcp_compute_health_check), M(google.cloud.gcp_compute_http_health_check) or
+ M(google.cloud.gcp_compute_https_health_check) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ check_interval:
+ type: int
+ description:
+ - How often (in seconds) to send a health check.
+ default: 5
+ healthcheck_name:
+ type: str
+ description:
+ - Name of the Healthcheck.
+ required: true
+ healthcheck_type:
+ type: str
+ description:
+ - Type of Healthcheck.
+ required: true
+ choices: ["HTTP", "HTTPS"]
+ host_header:
+ type: str
+ description:
+ - The value of the host header in the health check request. If left
+ empty, the public IP on behalf of which this health
+ check is performed will be used.
+ default: ""
+ port:
+ type: int
+ description:
+ - The TCP port number for the health check request. The default value is
+ 443 for HTTPS and 80 for HTTP.
+ request_path:
+ type: str
+ description:
+ - The request path of the HTTPS health check request.
+ required: false
+ default: "/"
+ state:
+ type: str
+ description: State of the Healthcheck.
+ choices: ["present", "absent"]
+ default: present
+ timeout:
+ type: int
+ description:
+ - How long (in seconds) to wait for a response before claiming
+ failure. It is invalid for timeout
+ to have a greater value than check_interval.
+ default: 5
+ unhealthy_threshold:
+ type: int
+ description:
+ - A so-far healthy instance will be marked unhealthy after this
+ many consecutive failures.
+ default: 2
+ healthy_threshold:
+ type: int
+ description:
+ - A so-far unhealthy instance will be marked healthy after this
+ many consecutive successes.
+ default: 2
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - Your GCP project ID
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ state: present
+- name: Create HTTP HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ host: my-host
+ request_path: /hc
+ check_interval: 10
+ timeout: 30
+ unhealthy_threshhold: 2
+ healthy_threshhold: 1
+ state: present
+- name: Create HTTPS HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: "{{ https_healthcheck }}"
+ healthcheck_type: HTTPS
+ host_header: my-host
+ request_path: /hc
+ check_interval: 5
+ timeout: 5
+ unhealthy_threshold: 2
+ healthy_threshold: 1
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Healthcheck
+ returned: Always.
+ type: str
+ sample: present
+healthcheck_name:
+ description: Name of the Healthcheck
+ returned: Always
+ type: str
+ sample: my-url-map
+healthcheck_type:
+ description: Type of the Healthcheck
+ returned: Always
+ type: str
+ sample: HTTP
+healthcheck:
+ description: GCP Healthcheck dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-healthcheck'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_healthcheck_params(params):
+ """
+ Validate healthcheck params.
+
+ Simple validation has already assumed by AnsibleModule.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ if params['timeout'] > params['check_interval']:
+ raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
+ params['timeout'], params['check_interval']))
+
+ return (True, '')
+
+
+def _build_healthcheck_dict(params):
+ """
+ Reformat services in Ansible Params for GCP.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP
+ HealthCheck (HTTP/HTTPS) API.
+ :rtype ``dict``
+ """
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
+ if 'timeout' in gcp_dict:
+ gcp_dict['timeoutSec'] = gcp_dict['timeout']
+ del gcp_dict['timeout']
+
+ if 'checkInterval' in gcp_dict:
+ gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
+ del gcp_dict['checkInterval']
+
+ if 'hostHeader' in gcp_dict:
+ gcp_dict['host'] = gcp_dict['hostHeader']
+ del gcp_dict['hostHeader']
+
+ if 'healthcheckType' in gcp_dict:
+ del gcp_dict['healthcheckType']
+ return gcp_dict
+
+
+def _get_req_resource(client, resource_type):
+ if resource_type == 'HTTPS':
+ return (client.httpsHealthChecks(), 'httpsHealthCheck')
+ else:
+ return (client.httpHealthChecks(), 'httpHealthCheck')
+
+
+def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
+ """
+ Get a Healthcheck from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.get(**args)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_healthcheck(client, params, project_id, resource_type='HTTP'):
+ """
+ Create a new Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ try:
+ resource, _ = _get_req_resource(client, resource_type)
+ args = {'project': project_id, 'body': gcp_dict}
+ req = resource.insert(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
+ """
+ Delete a Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.delete(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_healthcheck(client, healthcheck, params, name, project_id,
+ resource_type='HTTP'):
+ """
+ Update a Healthcheck.
+
+ If the healthcheck has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param healthcheck: Name of the Url Map.
+ :type healthcheck: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name, 'body': gcp_dict}
+ req = resource.update(**args)
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ healthcheck_name=dict(required=True),
+ healthcheck_type=dict(required=True,
+ choices=['HTTP', 'HTTPS']),
+ request_path=dict(required=False, default='/'),
+ check_interval=dict(required=False, type='int', default=5),
+ healthy_threshold=dict(required=False, type='int', default=2),
+ unhealthy_threshold=dict(required=False, type='int', default=2),
+ host_header=dict(required=False, type='str', default=''),
+ timeout=dict(required=False, type='int', default=5),
+ port=dict(required=False, type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+
+ params['healthcheck_name'] = module.params.get('healthcheck_name')
+ params['healthcheck_type'] = module.params.get('healthcheck_type')
+ params['request_path'] = module.params.get('request_path')
+ params['check_interval'] = module.params.get('check_interval')
+ params['healthy_threshold'] = module.params.get('healthy_threshold')
+ params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
+ params['host_header'] = module.params.get('host_header')
+ params['timeout'] = module.params.get('timeout')
+ params['port'] = module.params.get('port', None)
+ params['state'] = module.params.get('state')
+
+ if not params['port']:
+ params['port'] = 80
+ if params['healthcheck_type'] == 'HTTPS':
+ params['port'] = 443
+ try:
+ _validate_healthcheck_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ healthcheck = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+
+ if not healthcheck:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown healthcheck: %s" %
+ (params['healthcheck_name']))
+ else:
+ # Create
+ changed, json_output['healthcheck'] = create_healthcheck(client,
+ params=params,
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['healthcheck'] = delete_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ else:
+ changed, json_output['healthcheck'] = update_healthcheck(client,
+ healthcheck=healthcheck,
+ params=params,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py
new file mode 100644
index 00000000..611cee04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_target_proxy
+short_description: Create, Update or Destroy a Target_Proxy.
+description:
+ - Create, Update or Destroy a Target_Proxy. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Target_Proxy API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_target_http_proxy) instead.
+notes:
+ - Currently only supports global HTTP proxy.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ target_proxy_name:
+ type: str
+ description:
+ - Name of the Target_Proxy.
+ required: true
+ target_proxy_type:
+ type: str
+ description:
+ - Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported.
+ required: true
+ choices: [HTTP]
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url Map. Required if type is HTTP or HTTPS proxy.
+ required: false
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: str
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the target proxy should be in. C(present) or C(absent) are the only valid options.
+ required: true
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HTTP Target_Proxy
+ community.general.gcp_target_proxy:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ target_proxy_name: my-target_proxy
+ target_proxy_type: HTTP
+ url_map_name: my-url-map
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Target_Proxy
+ returned: Always.
+ type: str
+ sample: present
+updated_target_proxy:
+ description: True if the target_proxy has been updated. Will not appear on
+ initial target_proxy creation.
+ returned: if the target_proxy has been updated.
+ type: bool
+ sample: true
+target_proxy_name:
+ description: Name of the Target_Proxy
+ returned: Always
+ type: str
+ sample: my-target-proxy
+target_proxy_type:
+ description: Type of Target_Proxy. One of HTTP, HTTPS or SSL.
+ returned: Always
+ type: str
+ sample: HTTP
+target_proxy:
+ description: GCP Target_Proxy dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-target-proxy", "urlMap": "..." }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-target_proxy'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_target_proxy_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name')
+ if 'urlMap' in gcp_dict:
+ gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url,
+ gcp_dict['urlMap'])
+ return gcp_dict
+
+
+def get_target_http_proxy(client, name, project_id=None):
+ """
+ Get a Target HTTP Proxy from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ req = client.targetHttpProxies().get(project=project_id,
+ targetHttpProxy=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+
+
+def create_target_http_proxy(client, params, project_id):
+ """
+ Create a new Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+ try:
+ req = client.targetHttpProxies().insert(project=project_id,
+ body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_target_http_proxy(client, name, project_id):
+ """
+ Delete a Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.targetHttpProxies().delete(
+ project=project_id, targetHttpProxy=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_target_http_proxy(client, target_proxy, params, name, project_id):
+ """
+ Update a HTTP Target_Proxy. Currently only the Url Map can be updated.
+
+ If the target_proxy has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param target_proxy: Name of the Target Proxy.
+ :type target_proxy: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+
+ GCPUtils.are_params_equal(target_proxy, gcp_dict)
+ if target_proxy['urlMap'] == gcp_dict['urlMap']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.targetHttpProxies().setUrlMap(project=project_id,
+ targetHttpProxy=name,
+ body={"urlMap": gcp_dict['urlMap']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ target_proxy_name=dict(required=True),
+ target_proxy_type=dict(required=True, choices=['HTTP']),
+ url_map_name=dict(required=False),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['target_proxy_name'] = module.params.get('target_proxy_name')
+ params['target_proxy_type'] = module.params.get('target_proxy_type')
+ params['url_map'] = module.params.get('url_map_name', None)
+
+ changed = False
+ json_output = {'state': params['state']}
+ target_proxy = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+
+ if not target_proxy:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown target_proxy: %s" %
+ (params['target_proxy_name']))
+ else:
+ # Create
+ changed, json_output['target_proxy'] = create_target_http_proxy(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['target_proxy'] = delete_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['target_proxy'] = update_target_http_proxy(client,
+ target_proxy=target_proxy,
+ params=params,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_target_proxy'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py
new file mode 100644
index 00000000..3fc2c96b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_url_map
+short_description: Create, Update or Destroy a Url_Map.
+description:
+ - Create, Update or Destroy a Url_Map. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/url-map) for an overview.
+ More details on the Url_Map API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/urlMaps#resource).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports global Backend Services.
+ - Url_Map tests are not currently supported.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_url_map) instead.
+options:
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url_Map.
+ required: true
+ default_service:
+ type: str
+ description:
+ - Default Backend Service if no host rules match.
+ required: true
+ host_rules:
+ type: list
+ description:
+ - The list of HostRules to use against the URL. Contains
+ a list of hosts and an associated path_matcher.
+ - The 'hosts' parameter is a list of host patterns to match. They
+ must be valid hostnames, except * will match any string of
+ ([a-z0-9-.]*). In that case, * must be the first character
+ and must be followed in the pattern by either - or ..
+ - The 'path_matcher' parameter is name of the PathMatcher to use
+ to match the path portion of the URL if the hostRule matches the URL's
+ host portion.
+ required: false
+ path_matchers:
+ type: list
+ description:
+ - The list of named PathMatchers to use against the URL. Contains
+ path_rules, which is a list of paths and an associated service. A
+ default_service can also be specified for each path_matcher.
+ - The 'name' parameter to which this path_matcher is referred by the
+ host_rule.
+ - The 'default_service' parameter is the name of the
+ BackendService resource. This will be used if none of the path_rules
+ defined by this path_matcher is matched by the URL's path portion.
+ - The 'path_rules' parameter is a list of dictionaries containing a
+ list of paths and a service to direct traffic to. Each path item must
+ start with / and the only place a * is allowed is at the end following
+ a /. The string fed to the path matcher does not include any text after
+ the first ? or #, and those chars are not allowed here.
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the URL map should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimal Url_Map
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url_map
+ default_service: my-backend-service
+ state: present
+- name: Create UrlMap with pathmatcher
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url-map-pm
+ default_service: default-backend-service
+ path_matchers:
+ - name: 'path-matcher-one'
+ description: 'path matcher one'
+ default_service: 'bes-pathmatcher-one-default'
+ path_rules:
+ - service: 'my-one-bes'
+ paths:
+ - '/data'
+ - '/aboutus'
+ host_rules:
+ - hosts:
+ - '*.'
+ path_matcher: 'path-matcher-one'
+ state: "present"
+'''
+
+RETURN = '''
+host_rules:
+ description: List of HostRules.
+ returned: If specified.
+ type: dict
+ sample: [ { hosts: ["*."], "path_matcher": "my-pm" } ]
+path_matchers:
+ description: The list of named PathMatchers to use against the URL.
+ returned: If specified.
+ type: dict
+ sample: [ { "name": "my-pm", "path_rules": [ { "paths": [ "/data" ] } ], "service": "my-service" } ]
+state:
+ description: state of the Url_Map
+ returned: Always.
+ type: str
+ sample: present
+updated_url_map:
+ description: True if the url_map has been updated. Will not appear on
+ initial url_map creation.
+ returned: if the url_map has been updated.
+ type: bool
+ sample: true
+url_map_name:
+ description: Name of the Url_Map
+ returned: Always
+ type: str
+ sample: my-url-map
+url_map:
+ description: GCP Url_Map dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-url-map", "hostRules": [...], "pathMatchers": [...] }
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+from ansible.module_utils.six import string_types
+
+
+USER_AGENT_PRODUCT = 'ansible-url_map'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_params(params):
+ """
+ Validate url_map params.
+
+ This function calls _validate_host_rules_params to verify
+ the host_rules-specific parameters.
+
+ This function calls _validate_path_matchers_params to verify
+ the path_matchers-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'host_rules', 'type': list},
+ {'name': 'path_matchers', 'type': list},
+ ]
+ try:
+ check_params(params, fields)
+ if 'path_matchers' in params and params['path_matchers'] is not None:
+ _validate_path_matcher_params(params['path_matchers'])
+ if 'host_rules' in params and params['host_rules'] is not None:
+ _validate_host_rules_params(params['host_rules'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_path_matcher_params(path_matchers):
+ """
+ Validate configuration for path_matchers.
+
+ :param path_matchers: Ansible dictionary containing path_matchers
+ configuration (only).
+ :type path_matchers: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'name', 'type': str, 'required': True},
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'path_rules', 'type': list, 'required': True},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+ pr_fields = [
+ {'name': 'service', 'type': str, 'required': True},
+ {'name': 'paths', 'type': list, 'required': True},
+ ]
+
+ if not path_matchers:
+ raise ValueError(('path_matchers should be a list. %s (%s) provided'
+ % (path_matchers, type(path_matchers))))
+
+ for pm in path_matchers:
+ try:
+ check_params(pm, fields)
+ for pr in pm['path_rules']:
+ check_params(pr, pr_fields)
+ for path in pr['paths']:
+ if not path.startswith('/'):
+ raise ValueError("path for %s must start with /" % (
+ pm['name']))
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_host_rules_params(host_rules):
+ """
+ Validate configuration for host_rules.
+
+ :param host_rules: Ansible dictionary containing host_rules
+ configuration (only).
+ :type host_rules ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'path_matcher', 'type': str, 'required': True},
+ ]
+
+ if not host_rules:
+ raise ValueError('host_rules should be a list.')
+
+ for hr in host_rules:
+ try:
+ check_params(hr, fields)
+ for host in hr['hosts']:
+ if not isinstance(host, string_types):
+ raise ValueError("host in hostrules must be a string")
+ elif '*' in host:
+ if host.index('*') != 0:
+ raise ValueError("wildcard must be first char in host, %s" % (
+ host))
+ else:
+ if host[1] not in ['.', '-', ]:
+ raise ValueError("wildcard be followed by a '.' or '-', %s" % (
+ host))
+
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _build_path_matchers(path_matcher_list, project_id):
+ """
+ Reformat services in path matchers list.
+
+ Specifically, builds out URLs.
+
+ :param path_matcher_list: The GCP project ID.
+ :type path_matcher_list: ``list`` of ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: list suitable for submission to GCP
+ UrlMap API Path Matchers list.
+ :rtype ``list`` of ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ for pm in path_matcher_list:
+ if 'defaultService' in pm:
+ pm['defaultService'] = '%s/global/backendServices/%s' % (url,
+ pm['defaultService'])
+ if 'pathRules' in pm:
+ for rule in pm['pathRules']:
+ if 'service' in rule:
+ rule['service'] = '%s/global/backendServices/%s' % (url,
+ rule['service'])
+ return path_matcher_list
+
+
+def _build_url_map_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'url_map_name')
+ if 'defaultService' in gcp_dict:
+ gcp_dict['defaultService'] = '%s/global/backendServices/%s' % (url,
+ gcp_dict['defaultService'])
+ if 'pathMatchers' in gcp_dict:
+ gcp_dict['pathMatchers'] = _build_path_matchers(gcp_dict['pathMatchers'], project_id)
+
+ return gcp_dict
+
+
+def get_url_map(client, name, project_id=None):
+ """
+ Get a Url_Map from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.urlMaps().get(project=project_id, urlMap=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_url_map(client, params, project_id):
+ """
+ Create a new Url_Map.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+ try:
+ req = client.urlMaps().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_url_map(client, name, project_id):
+ """
+ Delete a Url_Map.
+
+ :param client: An initialized GCE Compute Discover resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.urlMaps().delete(project=project_id, urlMap=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_url_map(client, url_map, params, name, project_id):
+ """
+ Update a Url_Map.
+
+ If the url_map has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param url_map: Name of the Url Map.
+ :type url_map: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+
+ ans = GCPUtils.are_params_equal(url_map, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ gcp_dict['fingerprint'] = url_map['fingerprint']
+ try:
+ req = client.urlMaps().update(project=project_id,
+ urlMap=name, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ url_map_name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ default_service=dict(required=True),
+ path_matchers=dict(type='list', required=False),
+ host_rules=dict(type='list', required=False),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), required_together=[
+ ['path_matchers', 'host_rules'], ])
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['url_map_name'] = module.params.get('url_map_name')
+ params['default_service'] = module.params.get('default_service')
+ if module.params.get('path_matchers'):
+ params['path_matchers'] = module.params.get('path_matchers')
+ if module.params.get('host_rules'):
+ params['host_rules'] = module.params.get('host_rules')
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ url_map = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+
+ if not url_map:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown url_map: %s" %
+ (params['url_map_name']))
+ else:
+ # Create
+ changed, json_output['url_map'] = create_url_map(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['url_map'] = delete_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['url_map'] = update_url_map(client,
+ url_map=url_map,
+ params=params,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_url_map'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py
new file mode 100644
index 00000000..de257503
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.general.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py
new file mode 100644
index 00000000..e88fc26b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcspanner
+short_description: Create and Delete Instances/Databases on Spanner
+description:
+ - Create and Delete Instances/Databases on Spanner.
+ See U(https://cloud.google.com/spanner/docs) for an overview.
+requirements:
+ - python >= 2.6
+ - google-auth >= 0.5.0
+ - google-cloud-spanner >= 0.23.0
+notes:
+ - Changing the configuration on an existing instance is not supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_spanner_database) and/or M(google.cloud.gcp_spanner_instance) instead.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ configuration:
+ type: str
+ description:
+ - Configuration the instance should use.
+ - Examples are us-central1, asia-east1 and europe-west1.
+ required: yes
+ instance_id:
+ type: str
+ description:
+ - GCP spanner instance name.
+ required: yes
+ database_name:
+ type: str
+ description:
+ - Name of database contained on the instance.
+ force_instance_delete:
+ description:
+ - To delete an instance, this argument must exist and be true (along with state being equal to absent).
+ type: bool
+ default: 'no'
+ instance_display_name:
+ type: str
+ description:
+ - Name of Instance to display.
+ - If not specified, instance_id will be used instead.
+ node_count:
+ type: int
+ description:
+ - Number of nodes in the instance.
+ default: 1
+ state:
+ type: str
+ description:
+ - State of the instance or database. Applies to the most granular resource.
+ - If a C(database_name) is specified we remove it.
+ - If only C(instance_id) is specified, that is what is removed.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Create instance
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: present
+ node_count: 1
+
+- name: Create database
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ database_name: '{{ database_name }}'
+ state: present
+
+- name: Delete instance (and all databases)
+- community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: absent
+ force_instance_delete: yes
+'''
+
+RETURN = '''
+state:
+ description: The state of the instance or database. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+database_name:
+ description: Name of database.
+ returned: When database name is specified
+ type: str
+ sample: "mydatabase"
+
+instance_id:
+ description: Name of instance.
+ returned: Always
+ type: str
+ sample: "myinstance"
+
+previous_values:
+ description: List of dictionaries containing previous values prior to update.
+ returned: When an instance update has occurred and a field has been modified.
+ type: dict
+ sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
+
+updated:
+ description: Boolean field to denote an update has occurred.
+ returned: When an update has occurred.
+ type: bool
+ sample: True
+'''
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import spanner
+ from google.gax.errors import GaxError
+ HAS_GOOGLE_CLOUD_SPANNER = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_SPANNER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+from ansible.module_utils.six import string_types
+
+
+CLOUD_CLIENT = 'google-cloud-spanner'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
+
+
+def get_spanner_configuration_name(config_name, project_name):
+ config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
+ config_name)
+ return config_name
+
+
+def instance_update(instance):
+ """
+ Call update method on spanner client.
+
+ Note: A ValueError exception is thrown despite the client succeeding.
+ So, we validate the node_count and instance_display_name parameters and then
+ ignore the ValueError exception.
+
+ :param instance: a Spanner instance object
+ :type instance: class `google.cloud.spanner.Instance`
+
+ :returns True on success, raises ValueError on type error.
+ :rtype ``bool``
+ """
+ errmsg = ''
+ if not isinstance(instance.node_count, int):
+ errmsg = 'node_count must be an integer %s (%s)' % (
+ instance.node_count, type(instance.node_count))
+ if instance.display_name and not isinstance(instance.display_name,
+ string_types):
+ errmsg = 'instance_display_name must be an string %s (%s)' % (
+ instance.display_name, type(instance.display_name))
+ if errmsg:
+ raise ValueError(errmsg)
+
+ try:
+ instance.update()
+ except ValueError:
+ # The ValueError here is the one we 'expect'.
+ pass
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ database_name=dict(type='str'),
+ configuration=dict(type='str', required=True),
+ node_count=dict(type='int', default=1),
+ instance_display_name=dict(type='str'),
+ force_instance_delete=dict(type='bool', default=False),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_SPANNER:
+ module.fail_json(msg="Please install google-cloud-spanner.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" %
+ (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['instance_id'] = module.params.get('instance_id')
+ mod_params['database_name'] = module.params.get('database_name')
+ mod_params['configuration'] = module.params.get('configuration')
+ mod_params['node_count'] = module.params.get('node_count', None)
+ mod_params['instance_display_name'] = module.params.get('instance_display_name')
+ mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
+
+ creds, params = get_google_cloud_credentials(module)
+ spanner_client = spanner.Client(project=params['project_id'],
+ credentials=creds,
+ user_agent=CLOUD_CLIENT_USER_AGENT)
+ changed = False
+ json_output = {}
+
+ i = None
+ if mod_params['instance_id']:
+ config_name = get_spanner_configuration_name(
+ mod_params['configuration'], params['project_id'])
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name)
+ d = None
+ if mod_params['database_name']:
+ # TODO(supertom): support DDL
+ ddl_statements = ''
+ d = i.database(mod_params['database_name'], ddl_statements)
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If database is specified
+ # we remove it. If only instance is specified, that is what is removed.
+ if d is not None and d.exists():
+ d.drop()
+ changed = True
+ else:
+ if i.exists():
+ if mod_params['force_instance_delete']:
+ i.delete()
+ else:
+ module.fail_json(
+ msg=(("Cannot delete Spanner instance: "
+ "'force_instance_delete' argument not specified")))
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not i.exists():
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name,
+ display_name=mod_params['instance_display_name'],
+ node_count=mod_params['node_count'] or 1)
+ i.create()
+ changed = True
+ else:
+ # update instance
+ i.reload()
+ inst_prev_vals = {}
+ if i.display_name != mod_params['instance_display_name']:
+ inst_prev_vals['instance_display_name'] = i.display_name
+ i.display_name = mod_params['instance_display_name']
+ if mod_params['node_count']:
+ if i.node_count != mod_params['node_count']:
+ inst_prev_vals['node_count'] = i.node_count
+ i.node_count = mod_params['node_count']
+ if inst_prev_vals:
+ changed = instance_update(i)
+ json_output['updated'] = changed
+ json_output['previous_values'] = {'instance': inst_prev_vals}
+ if d:
+ if not d.exists():
+ d.create()
+ d.reload()
+ changed = True
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
new file mode 100644
index 00000000..276b5b12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: heroku_collaborator
+short_description: "Add or delete app collaborators on Heroku"
+description:
+ - Manages collaborators for Heroku apps.
+ - If set to C(present) and heroku user is already collaborator, then do nothing.
+ - If set to C(present) and heroku user is not collaborator, then add user to app.
+ - If set to C(absent) and heroku user is collaborator, then delete user from app.
+author:
+ - Marcel Arns (@marns93)
+requirements:
+ - heroku3
+options:
+ api_key:
+ type: str
+ description:
+ - Heroku API key
+ apps:
+ type: list
+ description:
+ - List of Heroku App names
+ required: true
+ suppress_invitation:
+ description:
+ - Suppress email invitation when creating collaborator
+ type: bool
+ default: "no"
+ user:
+ type: str
+ description:
+ - User ID or e-mail
+ required: true
+ state:
+ type: str
+ description:
+ - Create or remove the heroku collaborator
+ choices: ["present", "absent"]
+ default: "present"
+notes:
+ - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
+ - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+'''
+
+EXAMPLES = '''
+- name: Create a heroku collaborator
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: max.mustermann@example.com
+ apps: heroku-example-app
+ state: present
+
+- name: An example of using the module in loop
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: '{{ item.user }}'
+ apps: '{{ item.apps | default(apps) }}'
+ suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
+ state: '{{ item.state | default("present") }}'
+ with_items:
+ - { user: 'a.b@example.com' }
+ - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
+ - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
+
+
+def add_or_delete_heroku_collaborator(module, client):
+ user = module.params['user']
+ state = module.params['state']
+ affected_apps = []
+ result_state = False
+
+ for app in module.params['apps']:
+ if app not in client.apps():
+ module.fail_json(msg='App {0} does not exist'.format(app))
+
+ heroku_app = client.apps()[app]
+
+ heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
+
+ if state == 'absent' and user in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.remove_collaborator(user)
+ affected_apps += [app]
+ result_state = True
+ elif state == 'present' and user not in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
+ affected_apps += [app]
+ result_state = True
+
+ return result_state, affected_apps
+
+
+def main():
+ argument_spec = HerokuHelper.heroku_argument_spec()
+ argument_spec.update(
+ user=dict(required=True, type='str'),
+ apps=dict(required=True, type='list'),
+ suppress_invitation=dict(default=False, type='bool'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HerokuHelper(module).get_heroku_client()
+
+ has_changed, msg = add_or_delete_heroku_collaborator(module, client)
+ module.exit_json(changed=has_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
new file mode 100644
index 00000000..3d4ba84b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
@@ -0,0 +1,2135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_ecs_instance
+description:
+ - instance management.
+short_description: Creates a resource of Ecs/Instance in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ required: true
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ required: true
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the ECS name. Value requirements consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.).
+ type: str
+ required: true
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. Constraints the
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ required: true
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ required: true
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ required: true
+ suboptions:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ required: true
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements, consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types 'uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ required: false
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ required: true
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ required: false
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be
+ assigned.
+ type: str
+ required: false
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this
+ parameter is left blank, the default security group is bound to
+ the ECS by default.
+ type: list
+ elements: str
+ required: false
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ required: false
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ required: false
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ required: false
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with
+ base64. The maximum size of the content to be injected (before
+ encoding) is 32 KB. For Linux ECSs, this parameter does not take
+ effect when adminPass is used.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an ecs instance
+- name: Create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ register: eip
+- name: Create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ register: disk
+- name: Create an instance
+ community.general.hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ returned: success
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the ECS name. Value requirements "Consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.)".
+ type: str
+ returned: success
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. The
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ returned: success
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID corresponding to the IP address.
+ type: str
+ returned: success
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ returned: success
+ contains:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ returned: success
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types "uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ returned: success
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ returned: success
+ contains:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ returned: success
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be assigned.
+ type: str
+ returned: success
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this parameter is left
+ blank, the default security group is bound to the ECS by default.
+ type: list
+ returned: success
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ returned: success
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ returned: success
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ returned: success
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with base64. The maximum
+ size of the content to be injected (before encoding) is 32 KB. For
+ Linux ECSs, this parameter does not take effect when adminPass is
+ used.
+ type: str
+ returned: success
+ config_drive:
+ description:
+ - Specifies the configuration driver.
+ type: str
+ returned: success
+ created:
+ description:
+ - Specifies the time when an ECS was created.
+ type: str
+ returned: success
+ disk_config_type:
+ description:
+ - Specifies the disk configuration type. MANUAL is The image
+ space is not expanded. AUTO is the image space of the system disk
+ will be expanded to be as same as the flavor.
+ type: str
+ returned: success
+ host_name:
+ description:
+ - Specifies the host name of the ECS.
+ type: str
+ returned: success
+ image_name:
+ description:
+ - Specifies the image name of the ECS.
+ type: str
+ returned: success
+ power_state:
+ description:
+ - Specifies the power status of the ECS.
+ type: int
+ returned: success
+ server_alias:
+ description:
+ - Specifies the ECS alias.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
+ REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
+ and DELETED.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ flavor_name=dict(type='str', required=True),
+ image_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nics=dict(
+ type='list', required=True, elements='dict',
+ options=dict(
+ ip_address=dict(type='str', required=True),
+ subnet_id=dict(type='str', required=True)
+ ),
+ ),
+ root_volume=dict(type='dict', required=True, options=dict(
+ volume_type=dict(type='str', required=True),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ )),
+ vpc_id=dict(type='str', required=True),
+ admin_pass=dict(type='str', no_log=True),
+ data_volumes=dict(type='list', elements='dict', options=dict(
+ volume_id=dict(type='str', required=True),
+ device=dict(type='str')
+ )),
+ description=dict(type='str'),
+ eip_id=dict(type='str'),
+ enable_auto_recovery=dict(type='bool'),
+ enterprise_project_id=dict(type='str'),
+ security_groups=dict(type='list', elements='str'),
+ server_metadata=dict(type='dict'),
+ server_tags=dict(type='dict'),
+ ssh_key_name=dict(type='str'),
+ user_data=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "ecs")
+
+ try:
+ _init(config)
+ is_exist = module.params['id']
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params['id']:
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "admin_pass": module.params.get("admin_pass"),
+ "availability_zone": module.params.get("availability_zone"),
+ "data_volumes": module.params.get("data_volumes"),
+ "description": module.params.get("description"),
+ "eip_id": module.params.get("eip_id"),
+ "enable_auto_recovery": module.params.get("enable_auto_recovery"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "flavor_name": module.params.get("flavor_name"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "nics": module.params.get("nics"),
+ "root_volume": module.params.get("root_volume"),
+ "security_groups": module.params.get("security_groups"),
+ "server_metadata": module.params.get("server_metadata"),
+ "server_tags": module.params.get("server_tags"),
+ "ssh_key_name": module.params.get("ssh_key_name"),
+ "user_data": module.params.get("user_data"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait(config, r, client, timeout)
+
+ sub_job_identity = {
+ "job_type": "createSingleServer",
+ }
+ for item in navigate_value(obj, ["entities", "sub_jobs"]):
+ for k, v in sub_job_identity.items():
+ if item[k] != v:
+ break
+ else:
+ obj = item
+ break
+ else:
+ raise Exception("Can't find the sub job")
+ module.params['id'] = navigate_value(obj, ["entities", "server_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ client = config.client(get_region(module), "ecs", "project")
+
+ params = build_delete_nics_parameters(expect_state)
+ params1 = build_delete_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_delete_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ params = build_set_auto_recovery_parameters(expect_state)
+ params1 = build_set_auto_recovery_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_set_auto_recovery_request(module, params, client)
+
+ params = build_attach_nics_parameters(expect_state)
+ params1 = build_attach_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_attach_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ multi_invoke_delete_volume(config, expect_state, client, timeout)
+
+ multi_invoke_attach_data_disk(config, expect_state, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_delete_parameters(opts)
+ if params:
+ r = send_delete_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ preprocess_read_response(r)
+ res["read"] = fill_read_resp_body(r)
+
+ r = send_read_auto_recovery_request(module, client)
+ res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
+
+ return res, None
+
+
+def preprocess_read_response(resp):
+ v = resp.get("os-extended-volumes:volumes_attached")
+ if v and isinstance(v, list):
+ for i in range(len(v)):
+ if v[i].get("bootIndex") == "0":
+ root_volume = v[i]
+
+ if (i + 1) != len(v):
+ v[i] = v[-1]
+
+ v.pop()
+
+ resp["root_volume"] = root_volume
+ break
+
+ v = resp.get("addresses")
+ if v:
+ rv = {}
+ eips = []
+ for val in v.values():
+ for item in val:
+ if item["OS-EXT-IPS:type"] == "floating":
+ eips.append(item)
+ else:
+ rv[item["OS-EXT-IPS:port_id"]] = item
+
+ for item in eips:
+ k = item["OS-EXT-IPS:port_id"]
+ if k in rv:
+ rv[k]["eip_address"] = item.get("addr", "")
+ else:
+ rv[k] = item
+ item["eip_address"] = item.get("addr", "")
+ item["addr"] = ""
+
+ resp["address"] = rv.values()
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ adjust_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "enterprise_project_id=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={offset}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "cloudservers/detail" + query_link
+
+ result = []
+ p = {'offset': 1}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ adjust_list_resp(identity_obj, item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['offset'] += 1
+
+ return result
+
+
+def build_delete_nics_parameters(opts):
+ params = dict()
+
+ v = expand_delete_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_delete_nics_nics(d, array_index):
+ cv = d["current_state"].get("nics")
+ if not cv:
+ return None
+
+ val = cv
+
+ ev = d.get("nics")
+ if ev:
+ m = [item.get("ip_address") for item in ev]
+ val = [item for item in cv if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("port_id")
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_delete_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics/delete")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_set_auto_recovery_parameters(opts):
+ params = dict()
+
+ v = expand_set_auto_recovery_support_auto_recovery(opts, None)
+ if v is not None:
+ params["support_auto_recovery"] = v
+
+ return params
+
+
+def expand_set_auto_recovery_support_auto_recovery(d, array_index):
+ v = navigate_value(d, ["enable_auto_recovery"], None)
+ return None if v is None else str(v).lower()
+
+
+def send_set_auto_recovery_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(set_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_pass"], None)
+ if not is_empty_value(v):
+ params["adminPass"] = v
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = expand_create_extendparam(opts, None)
+ if not is_empty_value(v):
+ params["extendparam"] = v
+
+ v = navigate_value(opts, ["flavor_name"], None)
+ if not is_empty_value(v):
+ params["flavorRef"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = navigate_value(opts, ["ssh_key_name"], None)
+ if not is_empty_value(v):
+ params["key_name"] = v
+
+ v = navigate_value(opts, ["server_metadata"], None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ v = expand_create_root_volume(opts, None)
+ if not is_empty_value(v):
+ params["root_volume"] = v
+
+ v = expand_create_security_groups(opts, None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ v = expand_create_server_tags(opts, None)
+ if not is_empty_value(v):
+ params["server_tags"] = v
+
+ v = navigate_value(opts, ["user_data"], None)
+ if not is_empty_value(v):
+ params["user_data"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpcid"] = v
+
+ if not params:
+ return params
+
+ params = {"server": params}
+
+ return params
+
+
+def expand_create_extendparam(d, array_index):
+ r = dict()
+
+ r["chargingMode"] = 0
+
+ v = navigate_value(d, ["enterprise_project_id"], array_index)
+ if not is_empty_value(v):
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(d, ["enable_auto_recovery"], array_index)
+ if not is_empty_value(v):
+ r["support_auto_recovery"] = v
+
+ return r
+
+
+def expand_create_nics(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ v = navigate_value(
+ d, ["nics"], new_ai)
+
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_ai["nics"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["nics", "ip_address"], new_ai)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["nics", "subnet_id"], new_ai)
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["eip_id"], array_index)
+ if not is_empty_value(v):
+ r["id"] = v
+
+ return r
+
+
+def expand_create_root_volume(d, array_index):
+ r = dict()
+
+ v = expand_create_root_volume_extendparam(d, array_index)
+ if not is_empty_value(v):
+ r["extendparam"] = v
+
+ v = navigate_value(d, ["root_volume", "size"], array_index)
+ if not is_empty_value(v):
+ r["size"] = v
+
+ v = navigate_value(d, ["root_volume", "volume_type"], array_index)
+ if not is_empty_value(v):
+ r["volumetype"] = v
+
+ return r
+
+
+def expand_create_root_volume_extendparam(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
+ if not is_empty_value(v):
+ r["snapshotId"] = v
+
+ return r
+
+
+def expand_create_security_groups(d, array_index):
+ v = d.get("security_groups")
+ if not v:
+ return None
+
+ return [{"id": i} for i in v]
+
+
+def expand_create_server_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [{"key": k, "value": v1} for k, v1 in v.items()]
+
+
+def send_create_request(module, params, client):
+ url = "cloudservers"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_nics_parameters(opts):
+ params = dict()
+
+ v = expand_attach_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_attach_nics_nics(d, array_index):
+ ev = d.get("nics")
+ if not ev:
+ return None
+
+ val = ev
+
+ cv = d["current_state"].get("nics")
+ if cv:
+ m = [item.get("ip_address") for item in cv]
+ val = [item for item in ev if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("ip_address")
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = item.get("subnet_id")
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_attach_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_volume_request(module, params, client, info):
+ path_parameters = {
+ "volume_id": ["volume_id"],
+ }
+ data = dict((key, navigate_value(info, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_volume), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_data_disk_parameters(opts, array_index):
+ params = dict()
+
+ v = expand_attach_data_disk_volume_attachment(opts, array_index)
+ if not is_empty_value(v):
+ params["volumeAttachment"] = v
+
+ return params
+
+
+def expand_attach_data_disk_volume_attachment(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["data_volumes", "device"], array_index)
+ if not is_empty_value(v):
+ r["device"] = v
+
+ v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
+ if not is_empty_value(v):
+ r["volumeId"] = v
+
+ return r
+
+
+def send_attach_data_disk_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/attachvolume")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_data_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_delete_parameters(opts):
+ params = dict()
+
+ params["delete_publicip"] = False
+
+ params["delete_volume"] = False
+
+ v = expand_delete_servers(opts, None)
+ if not is_empty_value(v):
+ params["servers"] = v
+
+ return params
+
+
+def expand_delete_servers(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = expand_delete_servers_id(d, new_ai)
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_delete_servers_id(d, array_index):
+ return d["ansible_module"].params.get("id")
+
+
+def send_delete_request(module, params, client):
+ url = "cloudservers/delete"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "jobs/{job_id}", result)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_ecs_instance): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def multi_invoke_delete_volume(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = None
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in expect]
+ opts1 = {
+ "data_volumes": [
+ i for i in current if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ r = send_delete_volume_request(module, None, client, loop_val[i])
+ async_wait(config, r, client, timeout)
+
+
+def multi_invoke_attach_data_disk(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = opts
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in current]
+ opts1 = {
+ "data_volumes": [
+ i for i in expect if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
+ r = send_attach_data_disk_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def send_read_request(module, client):
+ url = build_path(module, "cloudservers/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["server"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ v = fill_read_resp_address(body.get("address"))
+ result["address"] = v
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_read_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_os_extended_volumes_volumes_attached(
+ body.get("os-extended-volumes:volumes_attached"))
+ result["os-extended-volumes:volumes_attached"] = v
+
+ v = fill_read_resp_root_volume(body.get("root_volume"))
+ result["root_volume"] = v
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_read_resp_address(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
+
+ val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
+
+ val["addr"] = item.get("addr")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["image_name"] = value.get("image_name")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_os_extended_volumes_volumes_attached(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["bootIndex"] = item.get("bootIndex")
+
+ val["device"] = item.get("device")
+
+ val["id"] = item.get("id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_root_volume(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["device"] = value.get("device")
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def send_read_auto_recovery_request(module, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def fill_read_auto_recovery_resp_body(body):
+ result = dict()
+
+ result["support_auto_recovery"] = body.get("support_auto_recovery")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "config_drive"], array_index)
+ r["config_drive"] = v
+
+ v = navigate_value(response, ["read", "created"], array_index)
+ r["created"] = v
+
+ v = flatten_data_volumes(response, array_index)
+ r["data_volumes"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
+ r["disk_config_type"] = v
+
+ v = flatten_enable_auto_recovery(response, array_index)
+ r["enable_auto_recovery"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "flavor", "id"], array_index)
+ r["flavor_name"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
+ r["host_name"] = v
+
+ v = navigate_value(response, ["read", "image", "id"], array_index)
+ r["image_id"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "image_name"], array_index)
+ r["image_name"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = flatten_nics(response, array_index)
+ r["nics"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-STS:power_state"], array_index)
+ r["power_state"] = v
+
+ v = flatten_root_volume(response, array_index)
+ r["root_volume"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
+ r["server_alias"] = v
+
+ v = flatten_server_tags(response, array_index)
+ r["server_tags"] = v
+
+ v = navigate_value(response, ["read", "key_name"], array_index)
+ r["ssh_key_name"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
+ r["user_data"] = v
+
+ v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def flatten_data_volumes(d, array_index):
+ v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.os-extended-volumes:volumes_attached"] = i
+
+ val = dict()
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
+ val["volume_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_auto_recovery(d, array_index):
+ v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
+ array_index)
+ return v == "true"
+
+
+def flatten_nics(d, array_index):
+ v = navigate_value(d, ["read", "address"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.address"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "address", "addr"], new_ai)
+ val["ip_address"] = v
+
+ v = navigate_value(
+ d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
+ val["port_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_root_volume(d, array_index):
+ result = dict()
+
+ v = navigate_value(d, ["read", "root_volume", "device"], array_index)
+ result["device"] = v
+
+ v = navigate_value(d, ["read", "root_volume", "id"], array_index)
+ result["volume_id"] = v
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return None
+
+
+def flatten_server_tags(d, array_index):
+ v = navigate_value(d, ["read", "tags"], array_index)
+ if not v:
+ return None
+
+ r = dict()
+ for item in v:
+ v1 = item.split("=")
+ if v1:
+ r[v1[0]] = v1[1]
+ return r
+
+
+def adjust_options(opts, states):
+ adjust_data_volumes(opts, states)
+
+ adjust_nics(opts, states)
+
+
+def adjust_data_volumes(parent_input, parent_cur):
+ iv = parent_input.get("data_volumes")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("data_volumes")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["volume_id"] != icv["volume_id"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(data_volumes) failed, "
+ "the array number is not equal")
+
+ parent_cur["data_volumes"] = result
+
+
+def adjust_nics(parent_input, parent_cur):
+ iv = parent_input.get("nics")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("nics")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["ip_address"] != icv["ip_address"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(nics) failed, "
+ "the array number is not equal")
+
+ parent_cur["nics"] = result
+
+
+def set_unreadable_options(opts, states):
+ states["admin_pass"] = opts.get("admin_pass")
+
+ states["eip_id"] = opts.get("eip_id")
+
+ set_unread_nics(
+ opts.get("nics"), states.get("nics"))
+
+ set_unread_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ states["security_groups"] = opts.get("security_groups")
+
+ states["server_metadata"] = opts.get("server_metadata")
+
+
+def set_unread_nics(inputv, curv):
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ if not (curv and isinstance(curv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ cv["subnet_id"] = iv.get("subnet_id")
+
+
+def set_unread_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ curv["size"] = inputv.get("size")
+
+ curv["snapshot_id"] = inputv.get("snapshot_id")
+
+ curv["volume_type"] = inputv.get("volume_type")
+
+
+def set_readonly_options(opts, states):
+ opts["config_drive"] = states.get("config_drive")
+
+ opts["created"] = states.get("created")
+
+ opts["disk_config_type"] = states.get("disk_config_type")
+
+ opts["host_name"] = states.get("host_name")
+
+ opts["image_name"] = states.get("image_name")
+
+ set_readonly_nics(
+ opts.get("nics"), states.get("nics"))
+
+ opts["power_state"] = states.get("power_state")
+
+ set_readonly_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ opts["server_alias"] = states.get("server_alias")
+
+ opts["status"] = states.get("status")
+
+
+def set_readonly_nics(inputv, curv):
+ if not (curv and isinstance(curv, list)):
+ return
+
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ iv["port_id"] = cv.get("port_id")
+
+
+def set_readonly_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ inputv["device"] = curv.get("device")
+
+ inputv["volume_id"] = curv.get("volume_id")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["servers"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = None
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["OS-EXT-AZ:availability_zone"] = v
+
+ result["OS-EXT-SRV-ATTR:hostname"] = None
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = None
+
+ v = navigate_value(all_opts, ["user_data"], None)
+ result["OS-EXT-SRV-ATTR:user_data"] = v
+
+ result["OS-EXT-STS:power_state"] = None
+
+ result["config_drive"] = None
+
+ result["created"] = None
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ v = expand_list_flavor(all_opts, None)
+ result["flavor"] = v
+
+ result["id"] = None
+
+ v = expand_list_image(all_opts, None)
+ result["image"] = v
+
+ v = navigate_value(all_opts, ["ssh_key_name"], None)
+ result["key_name"] = v
+
+ v = expand_list_metadata(all_opts, None)
+ result["metadata"] = v
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["status"] = None
+
+ v = expand_list_tags(all_opts, None)
+ result["tags"] = v
+
+ return result
+
+
+def expand_list_flavor(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["flavor_name"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_image(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [k + "=" + v1 for k, v1 in v.items()]
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_list_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_list_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def adjust_list_resp(opts, resp):
+ adjust_list_api_tags(opts, resp)
+
+
+def adjust_list_api_tags(parent_input, parent_cur):
+ iv = parent_input.get("tags")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("tags")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ result = []
+ for iiv in iv:
+ if iiv not in cv:
+ break
+
+ result.append(iiv)
+
+ j = cv.index(iiv)
+ cv[j] = cv[-1]
+ cv.pop()
+
+ if cv:
+ result.extend(cv)
+ parent_cur["tags"] = result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
new file mode 100644
index 00000000..4aec1b94
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
@@ -0,0 +1,1210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_evs_disk
+description:
+ - block storage management.
+short_description: Creates a resource of Evs/Disk in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ required: true
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ required: true
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ required: false
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ required: false
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ required: false
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ required: false
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ required: false
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ required: false
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# test create disk
+- name: Create a disk
+ community.general.hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ returned: success
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ returned: success
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ returned: success
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ returned: success
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ returned: success
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ returned: success
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ returned: success
+ attachments:
+ description:
+ - Specifies the disk attachment information.
+ type: complex
+ returned: success
+ contains:
+ attached_at:
+ description:
+ - Specifies the time when the disk was attached. Time
+ format is 'UTC YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ attachment_id:
+ description:
+ - Specifies the ID of the attachment information.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the device name.
+ type: str
+ returned: success
+ server_id:
+ description:
+ - Specifies the ID of the server to which the disk is
+ attached.
+ type: str
+ returned: success
+ backup_policy_id:
+ description:
+ - Specifies the backup policy ID.
+ type: str
+ returned: success
+ created_at:
+ description:
+ - Specifies the time when the disk was created. Time format is 'UTC
+ YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ is_bootable:
+ description:
+ - Specifies whether the disk is bootable.
+ type: bool
+ returned: success
+ is_readonly:
+ description:
+ - Specifies whether the disk is read-only or read/write. True
+ indicates that the disk is read-only. False indicates that the
+ disk is read/write.
+ type: bool
+ returned: success
+ source_volume_id:
+ description:
+ - Specifies the source disk ID. This parameter has a value if the
+ disk is created from a source disk.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the disk status.
+ type: str
+ returned: success
+ tags:
+ description:
+ - Specifies the disk tags.
+ type: dict
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ volume_type=dict(type='str', required=True),
+ backup_id=dict(type='str'),
+ description=dict(type='str'),
+ enable_full_clone=dict(type='bool'),
+ enable_scsi=dict(type='bool'),
+ enable_share=dict(type='bool'),
+ encryption_id=dict(type='str'),
+ enterprise_project_id=dict(type='str'),
+ image_id=dict(type='str'),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "evs")
+
+ try:
+ _init(config)
+ is_exist = module.params.get('id')
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params.get('id'):
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("find more than one resources(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "backup_id": module.params.get("backup_id"),
+ "description": module.params.get("description"),
+ "enable_full_clone": module.params.get("enable_full_clone"),
+ "enable_scsi": module.params.get("enable_scsi"),
+ "enable_share": module.params.get("enable_share"),
+ "encryption_id": module.params.get("encryption_id"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "size": module.params.get("size"),
+ "snapshot_id": module.params.get("snapshot_id"),
+ "volume_type": module.params.get("volume_type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ obj = async_wait(config, r, client1, timeout)
+ module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+
+ params = build_update_parameters(expect_state)
+ params1 = build_update_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_update_request(module, params, client)
+
+ params = build_extend_disk_parameters(expect_state)
+ params1 = build_extend_disk_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ client1 = config.client(get_region(module), "evsv2.1", "project")
+ r = send_extend_disk_request(module, params, client1)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client1, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ r = send_delete_request(module, None, client)
+
+ client = config.client(get_region(module), "volume", "project")
+ client.endpoint = client.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return res, None
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enable_share"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "multiattach=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["availability_zone"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "availability_zone=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={start}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ opts = user_input_parameters(module)
+ name = module.params.get("name")
+ query_link = _build_query_link(opts)
+ link = "os-vendor-volumes/detail" + query_link
+
+ result = []
+ p = {'start': 0}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ if name == item.get("name"):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['start'] += len(r)
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["backup_id"], None)
+ if not is_empty_value(v):
+ params["backup_id"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = expand_create_metadata(opts, None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["enable_share"], None)
+ if not is_empty_value(v):
+ params["multiattach"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["size"], None)
+ if not is_empty_value(v):
+ params["size"] = v
+
+ v = navigate_value(opts, ["snapshot_id"], None)
+ if not is_empty_value(v):
+ params["snapshot_id"] = v
+
+ v = navigate_value(opts, ["volume_type"], None)
+ if not is_empty_value(v):
+ params["volume_type"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def expand_create_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ if not is_empty_value(v):
+ r["__system__cmkid"] = v
+
+ v = expand_create_metadata_system_encrypted(d, array_index)
+ if not is_empty_value(v):
+ r["__system__encrypted"] = v
+
+ v = expand_create_metadata_full_clone(d, array_index)
+ if not is_empty_value(v):
+ r["full_clone"] = v
+
+ v = expand_create_metadata_hw_passthrough(d, array_index)
+ if not is_empty_value(v):
+ r["hw:passthrough"] = v
+
+ return r
+
+
+def expand_create_metadata_system_encrypted(d, array_index):
+ v = navigate_value(d, ["encryption_id"], array_index)
+ return "1" if v else ""
+
+
+def expand_create_metadata_full_clone(d, array_index):
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ return "0" if v else ""
+
+
+def expand_create_metadata_hw_passthrough(d, array_index):
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ if v is None:
+ return v
+ return "true" if v else "false"
+
+
+def send_create_request(module, params, client):
+ url = "cloudvolumes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if v is not None:
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_extend_disk_parameters(opts):
+ params = dict()
+
+ v = expand_extend_disk_os_extend(opts, None)
+ if not is_empty_value(v):
+ params["os-extend"] = v
+
+ return params
+
+
+def expand_extend_disk_os_extend(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["size"], array_index)
+ if not is_empty_value(v):
+ r["new_size"] = v
+
+ return r
+
+
+def send_extend_disk_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}/action")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(extend_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "job_id": ["job_id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "jobs/{job_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_evs_disk): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def send_read_request(module, client):
+ url = build_path(module, "os-vendor-volumes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volume"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_read_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_read_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = flatten_attachments(response, array_index)
+ r["attachments"] = v
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "metadata", "policy"], array_index)
+ r["backup_policy_id"] = v
+
+ v = navigate_value(response, ["read", "created_at"], array_index)
+ r["created_at"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = flatten_enable_full_clone(response, array_index)
+ r["enable_full_clone"] = v
+
+ v = flatten_enable_scsi(response, array_index)
+ r["enable_scsi"] = v
+
+ v = navigate_value(response, ["read", "multiattach"], array_index)
+ r["enable_share"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "__system__cmkid"], array_index)
+ r["encryption_id"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(
+ response, ["read", "volume_image_metadata", "id"], array_index)
+ r["image_id"] = v
+
+ v = flatten_is_bootable(response, array_index)
+ r["is_bootable"] = v
+
+ v = flatten_is_readonly(response, array_index)
+ r["is_readonly"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "size"], array_index)
+ r["size"] = v
+
+ v = navigate_value(response, ["read", "snapshot_id"], array_index)
+ r["snapshot_id"] = v
+
+ v = navigate_value(response, ["read", "source_volid"], array_index)
+ r["source_volume_id"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(response, ["read", "tags"], array_index)
+ r["tags"] = v
+
+ v = navigate_value(response, ["read", "volume_type"], array_index)
+ r["volume_type"] = v
+
+ return r
+
+
+def flatten_attachments(d, array_index):
+ v = navigate_value(d, ["read", "attachments"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.attachments"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
+ val["attached_at"] = v
+
+ v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
+ val["attachment_id"] = v
+
+ v = navigate_value(d, ["read", "attachments", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
+ val["server_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_full_clone(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "full_clone"],
+ array_index)
+ if v is None:
+ return v
+ return True if v == "0" else False
+
+
+def flatten_enable_scsi(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_bootable(d, array_index):
+ v = navigate_value(d, ["read", "bootable"], array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_readonly(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "readonly"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def set_unreadable_options(opts, states):
+ states["backup_id"] = opts.get("backup_id")
+
+
+def set_readonly_options(opts, states):
+ opts["attachments"] = states.get("attachments")
+
+ opts["backup_policy_id"] = states.get("backup_policy_id")
+
+ opts["created_at"] = states.get("created_at")
+
+ opts["is_bootable"] = states.get("is_bootable")
+
+ opts["is_readonly"] = states.get("is_readonly")
+
+ opts["source_volume_id"] = states.get("source_volume_id")
+
+ opts["status"] = states.get("status")
+
+ opts["tags"] = states.get("tags")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volumes"], None)
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ r["__system__cmkid"] = v
+
+ r["attached_mode"] = None
+
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ r["full_clone"] = v
+
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ r["hw:passthrough"] = v
+
+ r["policy"] = None
+
+ r["readonly"] = None
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_volume_image_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_list_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_list_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
new file mode 100644
index 00000000..f53369ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
@@ -0,0 +1,493 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_network_vpc
+description:
+ - Represents an vpc resource.
+short_description: Creates a Huawei Cloud VPC
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in vpc.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeout for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeout for update operation.
+ type: str
+ default: '15m'
+ delete:
+ description:
+ - The timeout for delete operation.
+ type: str
+ default: '15m'
+ name:
+ description:
+ - The name of vpc.
+ type: str
+ required: true
+ cidr:
+ description:
+ - The range of available subnets in the vpc.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a vpc
+ community.general.hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+'''
+
+RETURN = '''
+ id:
+ description:
+ - the id of vpc.
+ type: str
+ returned: success
+ name:
+ description:
+ - the name of vpc.
+ type: str
+ returned: success
+ cidr:
+ description:
+ - the range of available subnets in the vpc.
+ type: str
+ returned: success
+ status:
+ description:
+ - the status of vpc.
+ type: str
+ returned: success
+ routes:
+ description:
+ - the route information.
+ type: complex
+ returned: success
+ contains:
+ destination:
+ description:
+ - the destination network segment of a route.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - the next hop of a route. If the route type is peering,
+ it will provide VPC peering connection ID.
+ type: str
+ returned: success
+ enable_shared_snat:
+ description:
+ - show whether the shared snat is enabled.
+ type: bool
+ returned: success
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcClientException404, HwcModule,
+ are_different_dicts, is_empty_value,
+ wait_to_finish, get_region,
+ build_path, navigate_value)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(
+ default='present', choices=['present', 'absent'], type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ delete=dict(default='15m', type='str'),
+ ), default=dict()),
+ name=dict(required=True, type='str'),
+ cidr=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+ config = Config(module, 'vpc')
+
+ state = module.params['state']
+
+ if (not module.params.get("id")) and module.params.get("name"):
+ module.params['id'] = get_id_by_name(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "vpc", "project")
+ fetch = fetch_resource(module, client, link)
+ if fetch:
+ fetch = fetch.get('vpc')
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {"cidr": current_state["cidr"]}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config, self_link(module))
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config, self_link(module))
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config, "vpcs")
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.post(link, resource_to_create(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_done = wait_for_operation(config, 'create', r)
+ v = ""
+ try:
+ v = navigate_value(wait_done, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
+ return fetch_resource(module, client, url)
+
+
+def update(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.put(link, resource_to_update(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_operation(config, 'update', r)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_delete(module, client, link)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_id_by_name(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ name = module.params.get("name")
+ link = "vpcs"
+ query_link = "?marker={marker}&limit=10"
+ link += query_link
+ not_format_keys = re.findall("={marker}", link)
+ none_values = re.findall("=None", link)
+
+ if not (not_format_keys or none_values):
+ r = None
+ try:
+ r = client.get(link)
+ except Exception:
+ pass
+ if r is None:
+ return None
+ r = r.get('vpcs', [])
+ ids = [
+ i.get('id') for i in r if i.get('name', '') == name
+ ]
+ if not ids:
+ return None
+ elif len(ids) == 1:
+ return ids[0]
+ else:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+ elif none_values:
+ module.fail_json(
+ msg="Can not find id by name because url includes None.")
+ else:
+ p = {'marker': ''}
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('vpcs', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == name:
+ ids.add(i.get('id'))
+ if len(ids) >= 2:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+
+ p['marker'] = r[-1].get('id')
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "vpcs/{id}")
+
+
+def resource_to_create(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def resource_to_update(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def _get_editable_properties(module):
+ return {
+ "cidr": module.params.get("cidr"),
+ }
+
+
+def response_to_hash(module, response):
+ """ Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'id': response.get(u'id'),
+ u'name': response.get(u'name'),
+ u'cidr': response.get(u'cidr'),
+ u'status': response.get(u'status'),
+ u'routes': VpcRoutesArray(
+ response.get(u'routes', []), module).from_response(),
+ u'enable_shared_snat': response.get(u'enable_shared_snat')
+ }
+
+
+def wait_for_operation(config, op_type, op_result):
+ module = config.module
+ op_id = ""
+ try:
+ op_id = navigate_value(op_result, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
+ timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
+ states = {
+ 'create': {
+ 'allowed': ['CREATING', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ },
+ 'update': {
+ 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ }
+ }
+
+ return wait_for_completion(url, timeout, states[op_type]['allowed'],
+ states[op_type]['complete'], config)
+
+
+def wait_for_completion(op_uri, timeout, allowed_states,
+ complete_states, config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ def _refresh_status():
+ r = None
+ try:
+ r = fetch_resource(module, client, op_uri)
+ except Exception:
+ return None, ""
+
+ status = ""
+ try:
+ status = navigate_value(r, ['vpc', 'status'])
+ except Exception:
+ return None, ""
+
+ return r, status
+
+ try:
+ return wait_to_finish(complete_states, allowed_states,
+ _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def wait_for_delete(module, client, link):
+
+ def _refresh_status():
+ try:
+ client.get(link)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+ try:
+ return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+class VpcRoutesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return {
+ u'destination': item.get('destination'),
+ u'nexthop': item.get('next_hop')
+ }
+
+ def _response_from_item(self, item):
+ return {
+ u'destination': item.get(u'destination'),
+ u'next_hop': item.get(u'nexthop')
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
new file mode 100644
index 00000000..f7fb4fae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_smn_topic
+description:
+ - Represents a SMN notification topic resource.
+short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ type: str
+ required: false
+ name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a smn topic
+ community.general.hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user_name: "{{ user_name }}"
+ password: "{{ password }}"
+ domain_name: "{{ domain_name }}"
+ project_name: "{{ project_name }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+'''
+
+RETURN = '''
+create_time:
+ description:
+ - Time when the topic was created.
+ returned: success
+ type: str
+display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ returned: success
+ type: str
+name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ returned: success
+ type: str
+push_policy:
+ description:
+ - Message pushing policy. 0 indicates that the message sending
+ fails and the message is cached in the queue. 1 indicates that
+ the failed message is discarded.
+ returned: success
+ type: int
+topic_urn:
+ description:
+ - Resource identifier of a topic, which is unique.
+ returned: success
+ type: str
+update_time:
+ description:
+ - Time when the topic was updated.
+ returned: success
+ type: str
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcModule, navigate_value,
+ are_different_dicts, is_empty_value,
+ build_path, get_region)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ display_name=dict(type='str'),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ config = Config(module, "smn")
+
+ state = module.params['state']
+
+ if not module.params.get("id"):
+ module.params['id'] = get_resource_id(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "smn", "project")
+ fetch = fetch_resource(module, client, link)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_resource_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {'display_name': current_state['display_name']}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ r = None
+ try:
+ r = client.post(link, create_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return get_resource(config, r)
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.put(link, update_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_resource(config, result):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ v = ""
+ try:
+ v = navigate_value(result, ['topic_urn'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ d = {'topic_urn': v}
+ url = build_path(module, 'notifications/topics/{topic_urn}', d)
+
+ return fetch_resource(module, client, url)
+
+
+def get_resource_id(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ query_link = "?offset={offset}&limit=10"
+ link += query_link
+
+ p = {'offset': 0}
+ v = module.params.get('name')
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('topics', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == v:
+ ids.add(i.get('topic_urn'))
+ if len(ids) >= 2:
+ module.fail_json(msg="Multiple resources are found")
+
+ p['offset'] += 1
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "notifications/topics/{id}")
+
+
+def create_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ return params
+
+
+def update_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ return params
+
+
+def _get_resource_editable_properties(module):
+ return {
+ "display_name": module.params.get("display_name"),
+ }
+
+
+def response_to_hash(module, response):
+ """Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'create_time': response.get(u'create_time'),
+ u'display_name': response.get(u'display_name'),
+ u'name': response.get(u'name'),
+ u'push_policy': _push_policy_convert_from_response(
+ response.get('push_policy')),
+ u'topic_urn': response.get(u'topic_urn'),
+ u'update_time': response.get(u'update_time')
+ }
+
+
+def _push_policy_convert_from_response(value):
+ return {
+ 0: "the message sending fails and is cached in the queue",
+ 1: "the failed message is discarded",
+ }.get(int(value))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
new file mode 100644
index 00000000..b53395f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_eip
+description:
+ - elastic ip management.
+short_description: Creates a resource of Vpc/EIP in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '5m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '5m'
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ required: true
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ required: false
+ suboptions:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ required: false
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ required: false
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ required: false
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ required: false
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an eip and bind it to a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ register: port
+- name: Create an eip and bind it to a port
+ community.general.hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+'''
+
+RETURN = '''
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ returned: success
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ returned: success
+ contains:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows:.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ returned: success
+ id:
+ description:
+ - Specifies the ID of dedicated bandwidth.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ returned: success
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ returned: success
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ returned: success
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ returned: success
+ create_time:
+ description:
+ - Specifies the time (UTC time) when the EIP was assigned.
+ type: str
+ returned: success
+ ipv6_address:
+ description:
+ - Specifies the obtained IPv6 EIP.
+ type: str
+ returned: success
+ private_ip_address:
+ description:
+ - Specifies the private IP address bound with the EIP. This
+ parameter is returned only when a private IP address is bound
+ with the EIP.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='5m', type='str'),
+ update=dict(default='5m', type='str'),
+ ), default=dict()),
+ type=dict(type='str', required=True),
+ dedicated_bandwidth=dict(type='dict', options=dict(
+ charge_mode=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ size=dict(type='int', required=True)
+ )),
+ enterprise_project_id=dict(type='str'),
+ ip_version=dict(type='int'),
+ ipv4_address=dict(type='str'),
+ port_id=dict(type='str'),
+ shared_bandwidth_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "ip_version": module.params.get("ip_version"),
+ "ipv4_address": module.params.get("ipv4_address"),
+ "port_id": module.params.get("port_id"),
+ "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
+ "type": module.params.get("type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["publicip", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ if module.params["port_id"]:
+ module.params["port_id"] = ""
+ update(config)
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "publicips/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["ip_version"])
+ if v:
+ query_params.append("ip_version=" + str(v))
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "publicips" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_bandwidth(opts, None)
+ if not is_empty_value(v):
+ params["bandwidth"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ return params
+
+
+def expand_create_bandwidth(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ if not (v or sbwid):
+ raise Exception("must input shared_bandwidth_id or "
+ "dedicated_bandwidth")
+
+ if sbwid:
+ return {
+ "id": sbwid,
+ "share_type": "WHOLE"}
+
+ return {
+ "charge_mode": v["charge_mode"],
+ "name": v["name"],
+ "share_type": "PER",
+ "size": v["size"]}
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["ipv4_address"], array_index)
+ if not is_empty_value(v):
+ r["ip_address"] = v
+
+ v = navigate_value(d, ["ip_version"], array_index)
+ if not is_empty_value(v):
+ r["ip_version"] = v
+
+ v = navigate_value(d, ["type"], array_index)
+ if not is_empty_value(v):
+ r["type"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "publicips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "publicip_id": ["publicip", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "publicips/{publicip_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_version"], None)
+ if not is_empty_value(v):
+ params["ip_version"] = v
+
+ v = navigate_value(opts, ["port_id"], None)
+ if v is not None:
+ params["port_id"] = v
+
+ if not params:
+ return params
+
+ params = {"publicip": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "publicips/{id}")
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "publicips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "create_time"], array_index)
+ r["create_time"] = v
+
+ v = r.get("dedicated_bandwidth")
+ v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
+ r["dedicated_bandwidth"] = v
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "ip_version"], array_index)
+ r["ip_version"] = v
+
+ v = navigate_value(response, ["read", "public_ip_address"], array_index)
+ r["ipv4_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "public_ipv6_address"],
+ array_index)
+ r["ipv6_address"] = v
+
+ v = navigate_value(response, ["read", "port_id"], array_index)
+ r["port_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "private_ip_address"],
+ array_index)
+ r["private_ip_address"] = v
+
+ v = r.get("shared_bandwidth_id")
+ v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
+ r["shared_bandwidth_id"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ return r
+
+
+def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+ if not (v and v == "PER"):
+ return current_value
+
+ result = current_value
+ if not result:
+ result = dict()
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+ if v is not None:
+ result["id"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_name"], array_index)
+ if v is not None:
+ result["name"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_size"], array_index)
+ if v is not None:
+ result["size"] = v
+
+ return result if result else current_value
+
+
+def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+
+ v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+
+ return v if (v1 and v1 == "WHOLE") else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_bandwidth_id(all_opts, None)
+ result["bandwidth_id"] = v
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
+ result["bandwidth_name"] = v
+
+ result["bandwidth_share_type"] = None
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
+ result["bandwidth_size"] = v
+
+ result["create_time"] = None
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_version"], None)
+ result["ip_version"] = v
+
+ v = navigate_value(all_opts, ["port_id"], None)
+ result["port_id"] = v
+
+ result["private_ip_address"] = None
+
+ v = navigate_value(all_opts, ["ipv4_address"], None)
+ result["public_ip_address"] = v
+
+ result["public_ipv6_address"] = None
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ return result
+
+
+def expand_list_bandwidth_id(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ return sbwid
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
new file mode 100644
index 00000000..a4d5921b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_peering_connect
+description:
+ - vpc peering management.
+short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ required: true
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ required: true
+ suboptions:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ required: true
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ required: false
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ community.general.hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+'''
+
+RETURN = '''
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ returned: success
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ returned: success
+ contains:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ returned: success
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ returned: success
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ local_vpc_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ peering_vpc=dict(type='dict', required=True, options=dict(
+ vpc_id=dict(type='str', required=True),
+ project_id=dict(type='str')
+ )),
+ description=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "local_vpc_id": module.params.get("local_vpc_id"),
+ "name": module.params.get("name"),
+ "peering_vpc": module.params.get("peering_vpc"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["peering", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["local_vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/peerings" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_accept_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["accept_vpc_info"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_request_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["request_vpc_info"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def expand_create_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ if not is_empty_value(v):
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def expand_create_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = ""
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/peerings"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "peering_id": ["peering", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["peering", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["PENDING_ACCEPTANCE"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peering"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_read_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
+ array_index)
+ r["local_vpc_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = r.get("peering_vpc")
+ v = flatten_peering_vpc(response, array_index, v, exclude_output)
+ r["peering_vpc"] = v
+
+ return r
+
+
+def flatten_peering_vpc(d, array_index, current_value, exclude_output):
+ result = current_value
+ has_init_value = True
+ if not result:
+ result = dict()
+ has_init_value = False
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
+ array_index)
+ result["project_id"] = v
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
+ result["vpc_id"] = v
+
+ if has_init_value:
+ return result
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peerings"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_accept_vpc_info(all_opts, None)
+ result["accept_vpc_info"] = v
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = expand_list_request_vpc_info(all_opts, None)
+ result["request_vpc_info"] = v
+
+ result["status"] = None
+
+ return result
+
+
+def expand_list_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = None
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_list_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
new file mode 100644
index 00000000..cf0718f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
@@ -0,0 +1,1160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_port
+description:
+ - vpc port management.
+short_description: Creates a resource of Vpc/Port in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ required: true
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ required: false
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ required: false
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ required: false
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ required: false
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ required: false
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ required: false
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ community.general.hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ returned: success
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ returned: success
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ returned: success
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ returned: success
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ returned: success
+ contains:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ returned: success
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ returned: success
+ mac_address:
+ description:
+ - Specifies the port MAC address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ subnet_id=dict(type='str', required=True),
+ admin_state_up=dict(type='bool'),
+ allowed_address_pairs=dict(
+ type='list', elements='dict',
+ options=dict(
+ ip_address=dict(type='str'),
+ mac_address=dict(type='str')
+ ),
+ ),
+ extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ value=dict(type='str')
+ )),
+ ip_address=dict(type='str'),
+ name=dict(type='str'),
+ security_groups=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "admin_state_up": module.params.get("admin_state_up"),
+ "allowed_address_pairs": module.params.get("allowed_address_pairs"),
+ "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
+ "ip_address": module.params.get("ip_address"),
+ "name": module.params.get("name"),
+ "security_groups": module.params.get("security_groups"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["port", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "ports/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ array_index = {
+ "read.fixed_ips": 0,
+ }
+
+ return update_properties(module, res, array_index, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["subnet_id"])
+ if v:
+ query_params.append("network_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ v = navigate_value(opts, ["admin_state_up"])
+ if v:
+ query_params.append("admin_state_up=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "ports" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_state_up"], None)
+ if not is_empty_value(v):
+ params["admin_state_up"] = v
+
+ v = expand_create_allowed_address_pairs(opts, None)
+ if not is_empty_value(v):
+ params["allowed_address_pairs"] = v
+
+ v = expand_create_extra_dhcp_opts(opts, None)
+ if not is_empty_value(v):
+ params["extra_dhcp_opts"] = v
+
+ v = expand_create_fixed_ips(opts, None)
+ if not is_empty_value(v):
+ params["fixed_ips"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["network_id"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_create_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_create_request(module, params, client):
+ url = "ports"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "port_id": ["port", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "ports/{port_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["port", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ ["BUILD"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = expand_update_allowed_address_pairs(opts, None)
+ if v is not None:
+ params["allowed_address_pairs"] = v
+
+ v = expand_update_extra_dhcp_opts(opts, None)
+ if v is not None:
+ params["extra_dhcp_opts"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_update_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_update_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "ports/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["port"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_read_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "admin_state_up"], array_index)
+ r["admin_state_up"] = v
+
+ v = r.get("allowed_address_pairs")
+ v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
+ r["allowed_address_pairs"] = v
+
+ v = r.get("extra_dhcp_opts")
+ v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
+ r["extra_dhcp_opts"] = v
+
+ v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
+ array_index)
+ r["ip_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "mac_address"], array_index)
+ r["mac_address"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "security_groups"], array_index)
+ r["security_groups"] = v
+
+ v = navigate_value(response, ["read", "network_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def flatten_allowed_address_pairs(d, array_index,
+ current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "allowed_address_pairs"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.allowed_address_pairs"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
+ new_array_index)
+ val["ip_address"] = v
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
+ new_array_index)
+ val["mac_address"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "extra_dhcp_opts"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.extra_dhcp_opts"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
+ new_array_index)
+ val["name"] = v
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
+ new_array_index)
+ val["value"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["ports"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["admin_state_up"], None)
+ result["admin_state_up"] = v
+
+ v = expand_list_allowed_address_pairs(all_opts, None)
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = None
+
+ result["binding_vnic_type"] = None
+
+ result["device_id"] = None
+
+ result["device_owner"] = None
+
+ result["dns_name"] = None
+
+ v = expand_list_extra_dhcp_opts(all_opts, None)
+ result["extra_dhcp_opts"] = v
+
+ v = expand_list_fixed_ips(all_opts, None)
+ result["fixed_ips"] = v
+
+ result["id"] = None
+
+ result["mac_address"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["network_id"] = v
+
+ v = navigate_value(all_opts, ["security_groups"], None)
+ result["security_groups"] = v
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ return result
+
+
+def expand_list_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ transformed["mac_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ transformed["opt_value"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ transformed["ip_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_list_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
new file mode 100644
index 00000000..901755f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_private_ip
+description:
+ - vpc private ip management.
+short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
+ - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned. Cannot be changed after creating the private ip.
+ type: str
+ required: true
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address. Cannot be changed after
+ creating the private ip.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a private ip
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a private ip
+ community.general.hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ subnet_id=dict(type='str', required=True),
+ ip_address=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s)of an"
+ " existing resource.(%s)" % (current, expect, module.params.get('id')))
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "ip_address": module.params.get("ip_address"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["privateips", "id"],
+ {"privateips": 0})
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_address"], None)
+ if not is_empty_value(v):
+ params["ip_address"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["subnet_id"] = v
+
+ if not params:
+ return params
+
+ params = {"privateips": [params]}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "privateips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "privateips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "privateips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "ip_address"], array_index)
+ r["ip_address"] = v
+
+ v = navigate_value(response, ["read", "subnet_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_address"], None)
+ result["ip_address"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["subnet_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
new file mode 100644
index 00000000..31829dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_route
+description:
+ - vpc route management.
+short_description: Creates a resource of Vpc/Route in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
+ - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ required: true
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ required: false
+ default: 'peering'
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ register: connect
+- name: Create a route
+ community.general.hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+'''
+
+RETURN = '''
+ id:
+ description:
+ - UUID of the route.
+ type: str
+ returned: success
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ returned: success
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ destination=dict(type='str', required=True),
+ next_hop=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ type=dict(type='str', default='peering'),
+ id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = get_resource_by_id(config)
+ if module.params['state'] == 'present':
+ opts = user_input_parameters(module)
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing route.(%s)" % (resource, opts,
+ config.module.params.get(
+ 'id')))
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "destination": module.params.get("destination"),
+ "next_hop": module.params.get("next_hop"),
+ "type": module.params.get("type"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["route", "id"])
+
+ result = update_properties(module, {"read": fill_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+
+def get_resource_by_id(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_resp_body(r)
+
+ result = update_properties(module, res, None, exclude_output)
+ return result
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["type"])
+ if v:
+ query_params.append("type=" + str(v))
+
+ v = navigate_value(opts, ["destination"])
+ if v:
+ query_params.append("destination=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/routes" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["destination"], None)
+ if not is_empty_value(v):
+ params["destination"] = v
+
+ v = navigate_value(opts, ["next_hop"], None)
+ if not is_empty_value(v):
+ params["nexthop"] = v
+
+ v = navigate_value(opts, ["type"], None)
+ if not is_empty_value(v):
+ params["type"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"route": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/routes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["route"], None)
+
+
+def fill_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "destination"], array_index)
+ r["destination"] = v
+
+ v = navigate_value(response, ["read", "nexthop"], array_index)
+ r["next_hop"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["routes"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["destination"], None)
+ result["destination"] = v
+
+ v = navigate_value(all_opts, ["id"], None)
+ result["id"] = v
+
+ v = navigate_value(all_opts, ["next_hop"], None)
+ result["nexthop"] = v
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
new file mode 100644
index 00000000..60351815
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(name),
+ I(enterprise_project_id) and I(vpc_id) for security group selection.
+ - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ group selection. If more than one security group with this options exists,
+ execution is aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.s
+ type: str
+ required: false
+ default: 0
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group
+- name: Create a security group
+ community.general.hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+'''
+
+RETURN = '''
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ returned: success
+ rules:
+ description:
+ - Specifies the security group rule, which ensures that resources
+ in the security group can communicate with one another.
+ type: complex
+ returned: success
+ contains:
+ description:
+ description:
+ - Provides supplementary information about the security
+ group rule.
+ type: str
+ returned: success
+ direction:
+ description:
+ - Specifies the direction of access control. The value can
+ be egress or ingress.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4
+ or IPv6.
+ type: str
+ returned: success
+ id:
+ description:
+ - Specifies the security group rule ID.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to
+ 65535. If the protocol is not icmp, the value cannot be
+ smaller than the port_range_min value. An empty value
+ indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1
+ to 65535. The value cannot be greater than the
+ port_range_max value. An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp,
+ udp, or others. If the parameter is left blank, the
+ security group supports all protocols.
+ type: str
+ returned: success
+ remote_address_group_id:
+ description:
+ - Specifies the ID of remote IP address group.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control
+ direction is set to egress, the parameter specifies the
+ source IP address. If the access control direction is set
+ to ingress, the parameter specifies the destination IP
+ address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ name=dict(type='str', required=True),
+ enterprise_project_id=dict(type='str'),
+ vpc_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = read_resource(config)
+ if module.params['state'] == 'present':
+ check_resource_option(resource, module)
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def check_resource_option(resource, module):
+ opts = user_input_parameters(module)
+
+ resource = {
+ "enterprise_project_id": resource.get("enterprise_project_id"),
+ "name": resource.get("name"),
+ "vpc_id": resource.get("vpc_id"),
+ "id": resource.get("id"),
+ }
+
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (resource, opts,
+ module.params.get('id')))
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group", "id"])
+
+ result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-groups" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-groups"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-groups/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-groups/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ if not exclude_output:
+ v = r.get("rules")
+ v = flatten_rules(response, array_index, v, exclude_output)
+ r["rules"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def flatten_rules(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "security_group_rules"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.security_group_rules"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "description"],
+ new_array_index)
+ val["description"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "direction"],
+ new_array_index)
+ val["direction"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
+ new_array_index)
+ val["ethertype"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "id"],
+ new_array_index)
+ val["id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
+ new_array_index)
+ val["port_range_max"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
+ new_array_index)
+ val["port_range_min"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "protocol"],
+ new_array_index)
+ val["protocol"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
+ new_array_index)
+ val["remote_address_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
+ new_array_index)
+ val["remote_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
+ new_array_index)
+ val["remote_ip_prefix"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_groups"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["security_group_rules"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
new file mode 100644
index 00000000..f92c8276
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group_rule
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over
+ I(enterprise_project_id) for security group rule selection.
+ - I(security_group_id) is used for security group rule selection. If more
+ than one security group rule with this options exists, execution is
+ aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ required: true
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ required: true
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ required: false
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ required: false
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ required: false
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ required: false
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ required: false
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ required: false
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group rule
+- name: Create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ register: sg
+- name: Create a security group rule
+ community.general.hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 22
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+'''
+
+RETURN = '''
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ returned: success
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ returned: success
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ direction=dict(type='str', required=True),
+ security_group_id=dict(type='str', required=True),
+ description=dict(type='str'),
+ ethertype=dict(type='str'),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ protocol=dict(type='str'),
+ remote_group_id=dict(type='str'),
+ remote_ip_prefix=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (current, expect, module.params.get('id')))
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "direction": module.params.get("direction"),
+ "ethertype": module.params.get("ethertype"),
+ "port_range_max": module.params.get("port_range_max"),
+ "port_range_min": module.params.get("port_range_min"),
+ "protocol": module.params.get("protocol"),
+ "remote_group_id": module.params.get("remote_group_id"),
+ "remote_ip_prefix": module.params.get("remote_ip_prefix"),
+ "security_group_id": module.params.get("security_group_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["security_group_id"])
+ if v:
+ query_link += "&security_group_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-group-rules" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["direction"], None)
+ if not is_empty_value(v):
+ params["direction"] = v
+
+ v = navigate_value(opts, ["ethertype"], None)
+ if not is_empty_value(v):
+ params["ethertype"] = v
+
+ v = navigate_value(opts, ["port_range_max"], None)
+ if not is_empty_value(v):
+ params["port_range_max"] = v
+
+ v = navigate_value(opts, ["port_range_min"], None)
+ if not is_empty_value(v):
+ params["port_range_min"] = v
+
+ v = navigate_value(opts, ["protocol"], None)
+ if not is_empty_value(v):
+ params["protocol"] = v
+
+ v = navigate_value(opts, ["remote_group_id"], None)
+ if not is_empty_value(v):
+ params["remote_group_id"] = v
+
+ v = navigate_value(opts, ["remote_ip_prefix"], None)
+ if not is_empty_value(v):
+ params["remote_ip_prefix"] = v
+
+ v = navigate_value(opts, ["security_group_id"], None)
+ if not is_empty_value(v):
+ params["security_group_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group_rule": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-group-rules"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rule"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "direction"], array_index)
+ r["direction"] = v
+
+ v = navigate_value(response, ["read", "ethertype"], array_index)
+ r["ethertype"] = v
+
+ v = navigate_value(response, ["read", "port_range_max"], array_index)
+ r["port_range_max"] = v
+
+ v = navigate_value(response, ["read", "port_range_min"], array_index)
+ r["port_range_min"] = v
+
+ v = navigate_value(response, ["read", "protocol"], array_index)
+ r["protocol"] = v
+
+ v = navigate_value(response, ["read", "remote_group_id"], array_index)
+ r["remote_group_id"] = v
+
+ v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
+ r["remote_ip_prefix"] = v
+
+ v = navigate_value(response, ["read", "security_group_id"], array_index)
+ r["security_group_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rules"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["direction"], None)
+ result["direction"] = v
+
+ v = navigate_value(all_opts, ["ethertype"], None)
+ result["ethertype"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["port_range_max"], None)
+ result["port_range_max"] = v
+
+ v = navigate_value(all_opts, ["port_range_min"], None)
+ result["port_range_min"] = v
+
+ v = navigate_value(all_opts, ["protocol"], None)
+ result["protocol"] = v
+
+ result["remote_address_group_id"] = None
+
+ v = navigate_value(all_opts, ["remote_group_id"], None)
+ result["remote_group_id"] = v
+
+ v = navigate_value(all_opts, ["remote_ip_prefix"], None)
+ result["remote_ip_prefix"] = v
+
+ v = navigate_value(all_opts, ["security_group_id"], None)
+ result["security_group_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
new file mode 100644
index 00000000..ccf18050
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
@@ -0,0 +1,734 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_subnet
+description:
+ - subnet management.
+short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '15m'
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs. Cannot
+ be changed after creating the subnet.
+ type: str
+ required: true
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs. Cannot be changed
+ after creating the subnet.
+ type: str
+ required: false
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ required: false
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create subnet
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ community.general.hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+'''
+
+RETURN = '''
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28.
+ type: str
+ returned: success
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs.
+ type: str
+ returned: success
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs.
+ type: str
+ returned: success
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ returned: success
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ ), default=dict()),
+ cidr=dict(type='str', required=True),
+ gateway_ip=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ availability_zone=dict(type='str'),
+ dhcp_enable=dict(type='bool'),
+ dns_address=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get('id'):
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "cidr": module.params.get("cidr"),
+ "dhcp_enable": module.params.get("dhcp_enable"),
+ "dns_address": module.params.get("dns_address"),
+ "gateway_ip": module.params.get("gateway_ip"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["subnet", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "subnets/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_link += "&vpc_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "subnets" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["cidr"], None)
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_create_dns_list(opts, None)
+ if not is_empty_value(v):
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["gateway_ip"], None)
+ if not is_empty_value(v):
+ params["gateway_ip"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_primary_dns(opts, None)
+ if not is_empty_value(v):
+ params["primary_dns"] = v
+
+ v = expand_create_secondary_dns(opts, None)
+ if not is_empty_value(v):
+ params["secondary_dns"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_create_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v if (v and len(v) > 2) else []
+
+
+def expand_create_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_create_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_create_request(module, params, client):
+ url = "subnets"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_update_dns_list(opts, None)
+ if v is not None:
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_update_primary_dns(opts, None)
+ if v is not None:
+ params["primary_dns"] = v
+
+ v = expand_update_secondary_dns(opts, None)
+ if v is not None:
+ params["secondary_dns"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_update_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ if v:
+ if len(v) > 2:
+ return v
+ return None
+ return []
+
+
+def expand_update_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_update_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "subnets/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnet"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "cidr"], array_index)
+ r["cidr"] = v
+
+ v = navigate_value(response, ["read", "dhcp_enable"], array_index)
+ r["dhcp_enable"] = v
+
+ v = navigate_value(response, ["read", "dnsList"], array_index)
+ r["dns_address"] = v
+
+ v = navigate_value(response, ["read", "gateway_ip"], array_index)
+ r["gateway_ip"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnets"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["availability_zone"] = v
+
+ v = navigate_value(all_opts, ["cidr"], None)
+ result["cidr"] = v
+
+ v = navigate_value(all_opts, ["dhcp_enable"], None)
+ result["dhcp_enable"] = v
+
+ v = navigate_value(all_opts, ["dns_address"], None)
+ result["dnsList"] = v
+
+ v = navigate_value(all_opts, ["gateway_ip"], None)
+ result["gateway_ip"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["neutron_network_id"] = None
+
+ result["neutron_subnet_id"] = None
+
+ result["primary_dns"] = None
+
+ result["secondary_dns"] = None
+
+ result["status"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py
new file mode 100644
index 00000000..f25d7d70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_cdi_upload
+
+short_description: Upload local VM images to CDI Upload Proxy.
+
+
+author: KubeVirt Team (@kubevirt)
+
+
+description:
+ - Use Openshift Python SDK to create UploadTokenRequest objects.
+ - Transfer contents of local files to the CDI Upload Proxy.
+
+options:
+ pvc_name:
+ description:
+ - Use to specify the name of the target PersistentVolumeClaim.
+ required: true
+ pvc_namespace:
+ description:
+ - Use to specify the namespace of the target PersistentVolumeClaim.
+ required: true
+ upload_host:
+ description:
+ - URL containing the host and port on which the CDI Upload Proxy is available.
+ - "More info: U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/upload.md#expose-cdi-uploadproxy-service)"
+ upload_host_validate_certs:
+ description:
+ - Whether or not to verify the CDI Upload Proxy's SSL certificates against your system's CA trust store.
+ default: true
+ type: bool
+ aliases: [ upload_host_verify_ssl ]
+ path:
+ description:
+ - Path of local image file to transfer.
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+ - requests >= 2.0.0
+'''
+
+EXAMPLES = '''
+- name: Upload local image to pvc-vm1
+ community.general.kubevirt_cdi_upload:
+ pvc_namespace: default
+ pvc_name: pvc-vm1
+ upload_host: https://localhost:8443
+ upload_host_validate_certs: false
+ path: /tmp/cirros-0.4.0-x86_64-disk.img
+'''
+
+RETURN = '''# '''
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+SERVICE_ARG_SPEC = {
+ 'pvc_name': {'required': True},
+ 'pvc_namespace': {'required': True},
+ 'upload_host': {'required': True},
+ 'upload_host_validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['upload_host_verify_ssl']
+ },
+ 'path': {'required': True},
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+}
+
+
+class KubeVirtCDIUpload(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtCDIUpload, self).__init__(*args, k8s_kind='UploadTokenRequest', **kwargs)
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(SERVICE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ """ Module execution """
+
+ API = 'v1alpha1'
+ KIND = 'UploadTokenRequest'
+
+ self.client = self.get_api_client()
+
+ api_version = 'upload.cdi.kubevirt.io/{0}'.format(API)
+ pvc_name = self.params.get('pvc_name')
+ pvc_namespace = self.params.get('pvc_namespace')
+ upload_host = self.params.get('upload_host')
+ upload_host_verify_ssl = self.params.get('upload_host_validate_certs')
+ path = self.params.get('path')
+
+ definition = defaultdict(defaultdict)
+
+ definition['kind'] = KIND
+ definition['apiVersion'] = api_version
+
+ def_meta = definition['metadata']
+ def_meta['name'] = pvc_name
+ def_meta['namespace'] = pvc_namespace
+
+ def_spec = definition['spec']
+ def_spec['pvcName'] = pvc_name
+
+ # Let's check the file's there before we do anything else
+ imgfile = open(path, 'rb')
+
+ resource = self.find_resource(KIND, api_version, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ headers = {'Authorization': "Bearer {0}".format(result['result']['status']['token'])}
+ url = "{0}/{1}/upload".format(upload_host, API)
+ ret = requests.post(url, data=imgfile, headers=headers, verify=upload_host_verify_ssl)
+
+ if ret.status_code != 200:
+ self.fail_request("Something went wrong while uploading data", method='POST', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ self.exit_json(changed=True)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = KubeVirtCDIUpload()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py
new file mode 100644
index 00000000..7e0776c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_preset
+
+short_description: Manage KubeVirt virtual machine presets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine presets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine preset.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine preset exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine preset."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: present
+ name: vmi-preset-small
+ namespace: vms
+ memory: 64M
+ selector:
+ matchLabels:
+ kubevirt.io/vmPreset: vmi-preset-small
+
+- name: Remove virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: absent
+ name: vmi-preset-small
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_preset:
+ description:
+ - The virtual machine preset managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC
+)
+
+
+KIND = 'VirtualMachineInstancePreset'
+VMP_ARG_SPEC = {
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMPreset(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VMP_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ # FIXME: Devices must be set, but we don't yet support any
+ # attributes there, remove when we do:
+ definition['spec']['domain']['devices'] = dict()
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_preset': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMPreset()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py
new file mode 100644
index 00000000..5687c23d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_pvc
+
+short_description: Manage PVCs on Kubernetes
+
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - Use Openshift Python SDK to manage PVCs on Kubernetes
+ - Support Containerized Data Importer out of the box
+
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the PVC object being created/updated. Here you can define Kubernetes
+ PVC Resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ state:
+ description:
+ - "Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ module options and I(resource_definition)."
+ default: present
+ choices:
+ - present
+ - absent
+ force:
+ description:
+ - If set to C(True), and I(state) is C(present), an existing object will be replaced.
+ default: false
+ type: bool
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - "This defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources."
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - If more than one merge_type is given, the merge_types will be tried in order
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ name:
+ description:
+ - Use to specify a PVC object name.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Use to specify a PVC object namespace.
+ required: true
+ type: str
+ annotations:
+ description:
+ - Annotations attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ type: dict
+ labels:
+ description:
+ - Labels attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ selector:
+ description:
+ - A label query over volumes to consider for binding.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ access_modes:
+ description:
+ - Contains the desired access modes the volume should have.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes)"
+ type: list
+ size:
+ description:
+ - How much storage to allocate to the PVC.
+ type: str
+ aliases:
+ - storage
+ storage_class_name:
+ description:
+ - Name of the StorageClass required by the claim.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1)"
+ type: str
+ volume_mode:
+ description:
+ - "This defines what type of volume is required by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature of kubernetes and may change in the future."
+ type: str
+ volume_name:
+ description:
+ - This is the binding reference to the PersistentVolume backing this claim.
+ type: str
+ cdi_source:
+ description:
+ - "If data is to be copied onto the PVC using the Containerized Data Importer you can specify the source of
+ the data (along with any additional configuration) as well as it's format."
+ - "Valid source types are: blank, http, s3, registry, pvc and upload. The last one requires using the
+ M(community.general.kubevirt_cdi_upload) module to actually perform an upload."
+ - "Source data format is specified using the optional I(content_type). Valid options are C(kubevirt)
+ (default; raw image) and C(archive) (tar.gz)."
+ - "This uses the DataVolume source syntax:
+ U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/datavolumes.md#https3registry-source)"
+ type: dict
+ wait:
+ description:
+ - "If set, this module will wait for the PVC to become bound and CDI (if enabled) to finish its operation
+ before returning."
+ - "Used only if I(state) set to C(present)."
+ - "Unless used in conjunction with I(cdi_source), this might result in a timeout, as clusters may be configured
+ to not bind PVCs until first usage."
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - Specifies how much time in seconds to wait for PVC creation to complete if I(wait) option is enabled.
+ - Default value is reasonably high due to an expectation that CDI might take a while to finish its operation.
+ type: int
+ default: 300
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create a PVC and import data from an external source
+ community.general.kubevirt_pvc:
+ name: pvc1
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ http:
+ url: https://www.source.example/path/of/data/vm.img
+ # If the URL points to a tar.gz containing the disk image, uncomment the line below:
+ #content_type: archive
+
+- name: Create a PVC as a clone from a different PVC
+ community.general.kubevirt_pvc:
+ name: pvc2
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ pvc:
+ namespace: source-ns
+ name: source-pvc
+
+- name: Create a PVC ready for data upload
+ community.general.kubevirt_pvc:
+ name: pvc3
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ upload: yes
+ # You need the kubevirt_cdi_upload module to actually upload something
+
+- name: Create a PVC with a blank raw image
+ community.general.kubevirt_pvc:
+ name: pvc4
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ blank: yes
+
+- name: Create a PVC and fill it with data from a container
+ community.general.kubevirt_pvc:
+ name: pvc5
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ registry:
+ url: "docker://kubevirt/fedora-cloud-registry-disk-demo"
+
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.module_utils.kubevirt import virtdict, KubeVirtRawModule
+
+
+PVC_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent'
+ ],
+ 'default': 'present'
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'labels': {'type': 'dict'},
+ 'annotations': {'type': 'dict'},
+ 'selector': {'type': 'dict'},
+ 'access_modes': {'type': 'list'},
+ 'size': {
+ 'type': 'str',
+ 'aliases': ['storage']
+ },
+ 'storage_class_name': {'type': 'str'},
+ 'volume_mode': {'type': 'str'},
+ 'volume_name': {'type': 'str'},
+ 'cdi_source': {'type': 'dict'},
+ 'wait': {
+ 'type': 'bool',
+ 'default': False
+ },
+ 'wait_timeout': {
+ 'type': 'int',
+ 'default': 300
+ }
+}
+
+
+class CreatePVCFailed(Exception):
+ pass
+
+
+class KubevirtPVC(KubernetesRawModule):
+ def __init__(self):
+ super(KubevirtPVC, self).__init__()
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(PVC_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _parse_cdi_source(self, _cdi_src, metadata):
+ cdi_src = copy.deepcopy(_cdi_src)
+ annotations = metadata['annotations']
+ labels = metadata['labels']
+
+ valid_content_types = ('kubevirt', 'archive')
+ valid_sources = ('http', 's3', 'pvc', 'upload', 'blank', 'registry')
+
+ if 'content_type' in cdi_src:
+ content_type = cdi_src.pop('content_type')
+ if content_type not in valid_content_types:
+ raise ValueError("cdi_source.content_type must be one of {0}, not: '{1}'".format(
+ valid_content_types, content_type))
+ annotations['cdi.kubevirt.io/storage.contentType'] = content_type
+
+ if len(cdi_src) != 1:
+ raise ValueError("You must specify exactly one valid CDI source, not {0}: {1}".format(len(cdi_src), tuple(cdi_src.keys())))
+
+ src_type = tuple(cdi_src.keys())[0]
+ src_spec = cdi_src[src_type]
+
+ if src_type not in valid_sources:
+ raise ValueError("Got an invalid CDI source type: '{0}', must be one of {1}".format(src_type, valid_sources))
+
+ # True for all cases save one
+ labels['app'] = 'containerized-data-importer'
+
+ if src_type == 'upload':
+ annotations['cdi.kubevirt.io/storage.upload.target'] = ''
+ elif src_type == 'blank':
+ annotations['cdi.kubevirt.io/storage.import.source'] = 'none'
+ elif src_type == 'pvc':
+ if not isinstance(src_spec, dict) or sorted(src_spec.keys()) != ['name', 'namespace']:
+ raise ValueError("CDI Source 'pvc' requires specifying 'name' and 'namespace' (and nothing else)")
+ labels['app'] = 'host-assisted-cloning'
+ annotations['k8s.io/CloneRequest'] = '{0}/{1}'.format(src_spec['namespace'], src_spec['name'])
+ elif src_type in ('http', 's3', 'registry'):
+ if not isinstance(src_spec, dict) or 'url' not in src_spec:
+ raise ValueError("CDI Source '{0}' requires specifying 'url'".format(src_type))
+ unknown_params = set(src_spec.keys()).difference(set(('url', 'secretRef', 'certConfigMap')))
+ if unknown_params:
+ raise ValueError("CDI Source '{0}' does not know recognize params: {1}".format(src_type, tuple(unknown_params)))
+ annotations['cdi.kubevirt.io/storage.import.source'] = src_type
+ annotations['cdi.kubevirt.io/storage.import.endpoint'] = src_spec['url']
+ if 'secretRef' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.secretName'] = src_spec['secretRef']
+ if 'certConfigMap' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.certConfigMap'] = src_spec['certConfigMap']
+
+ def _wait_for_creation(self, resource, uid):
+ return_obj = None
+ desired_cdi_status = 'Succeeded'
+ use_cdi = True if self.params.get('cdi_source') else False
+ if use_cdi and 'upload' in self.params['cdi_source']:
+ desired_cdi_status = 'Running'
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ metadata = entity.metadata
+ if not hasattr(metadata, 'uid') or metadata.uid != uid:
+ continue
+ if entity.status.phase == 'Bound':
+ if use_cdi and hasattr(metadata, 'annotations'):
+ import_status = metadata.annotations.get('cdi.kubevirt.io/storage.pod.phase')
+ if import_status == desired_cdi_status:
+ return_obj = entity
+ break
+ elif import_status == 'Failed':
+ raise CreatePVCFailed("PVC creation incomplete; importing data failed")
+ else:
+ return_obj = entity
+ break
+ elif entity.status.phase == 'Failed':
+ raise CreatePVCFailed("PVC creation failed")
+
+ if not return_obj:
+ raise CreatePVCFailed("PVC creation timed out")
+
+ return self.fix_serialization(return_obj)
+
+ def execute_module(self):
+ KIND = 'PersistentVolumeClaim'
+ API = 'v1'
+
+ definition = virtdict()
+ definition['kind'] = KIND
+ definition['apiVersion'] = API
+
+ metadata = definition['metadata']
+ metadata['name'] = self.params.get('name')
+ metadata['namespace'] = self.params.get('namespace')
+ if self.params.get('annotations'):
+ metadata['annotations'] = self.params.get('annotations')
+ if self.params.get('labels'):
+ metadata['labels'] = self.params.get('labels')
+ if self.params.get('cdi_source'):
+ self._parse_cdi_source(self.params.get('cdi_source'), metadata)
+
+ spec = definition['spec']
+ if self.params.get('access_modes'):
+ spec['accessModes'] = self.params.get('access_modes')
+ if self.params.get('size'):
+ spec['resources']['requests']['storage'] = self.params.get('size')
+ if self.params.get('storage_class_name'):
+ spec['storageClassName'] = self.params.get('storage_class_name')
+ if self.params.get('selector'):
+ spec['selector'] = self.params.get('selector')
+ if self.params.get('volume_mode'):
+ spec['volumeMode'] = self.params.get('volume_mode')
+ if self.params.get('volume_name'):
+ spec['volumeName'] = self.params.get('volume_name')
+
+ # 'resource_definition:' has lower priority than module parameters
+ definition = dict(KubeVirtRawModule.merge_dicts(definition, self.resource_definitions[0]))
+
+ self.client = self.get_api_client()
+ resource = self.find_resource(KIND, API, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+ if self.params.get('wait') and self.params.get('state') == 'present':
+ result['result'] = self._wait_for_creation(resource, result['result']['metadata']['uid'])
+
+ self.exit_json(**result)
+
+
+def main():
+ module = KubevirtPVC()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py
new file mode 100644
index 00000000..d1fdc394
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_rs
+
+short_description: Manage KubeVirt virtual machine replica sets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine replica sets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine replica set.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine replica set exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine."
+ required: true
+ type: dict
+ replicas:
+ description:
+ - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
+ - Replicas defaults to 1 if newly created replica set.
+ type: int
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: present
+ name: myvmir
+ namespace: vms
+ wait: true
+ replicas: 3
+ memory: 64M
+ labels:
+ myvmi: myvmi
+ selector:
+ matchLabels:
+ myvmi: myvmi
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Remove virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: absent
+ name: myvmir
+ namespace: vms
+ wait: true
+'''
+
+RETURN = '''
+kubevirt_rs:
+ description:
+ - The virtual machine virtual machine replica set managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+)
+
+
+KIND = 'VirtualMachineInstanceReplicaSet'
+VMIR_ARG_SPEC = {
+ 'replicas': {'type': 'int'},
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMIRS(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
+ return argument_spec
+
+ def wait_for_replicas(self, replicas):
+ """ Wait for ready_replicas to equal the requested number of replicas. """
+ resource = self.find_supported_resource(KIND)
+ return_obj = None
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ readyReplicas = status.get('readyReplicas', 0)
+ if readyReplicas == replicas:
+ return_obj = entity
+ break
+
+ if not return_obj:
+ self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas is None:
+ self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas != replicas:
+ self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
+ "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
+ return return_obj.to_dict()
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+ replicas = self.params.get('replicas')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ if replicas is not None:
+ definition['spec']['replicas'] = replicas
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ template = definition['spec']['template']
+ dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
+ # wait logic work correctly
+ if changed and result_crud['method'] == 'create' and replicas is None:
+ replicas = 1
+
+ # Wait for the new number of ready replicas after a CRUD update
+ # Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
+ # Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
+ # achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
+ if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
+ result = self.wait_for_replicas(replicas)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_rs': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMIRS()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py
new file mode 100644
index 00000000..3054b1a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_template
+
+short_description: Manage KubeVirt templates
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt templates.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ name:
+ description:
+ - Name of the Template object.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the Template object exists.
+ required: true
+ type: str
+ objects:
+ description:
+ - List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
+ will be created exactly as defined here, with any parameter values substituted in prior to creation.
+ The definition of these objects can reference parameters defined earlier.
+ - As part of the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
+ user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
+ please take a look at M(community.general.kubevirt_vm) module and at EXAMPLES section, where you can see example.
+ type: list
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ display_name:
+ description:
+ - "A brief, user-friendly name, which can be employed by user interfaces."
+ type: str
+ description:
+ description:
+ - A description of the template.
+ - Include enough detail that the user will understand what is being deployed...
+ and any caveats they need to know before deploying. It should also provide links to additional information,
+ such as a README file."
+ type: str
+ long_description:
+ description:
+ - "Additional template description. This may be displayed by the service catalog, for example."
+ type: str
+ provider_display_name:
+ description:
+ - "The name of the person or organization providing the template."
+ type: str
+ documentation_url:
+ description:
+ - "A URL referencing further documentation for the template."
+ type: str
+ support_url:
+ description:
+ - "A URL where support can be obtained for the template."
+ type: str
+ editable:
+ description:
+ - "Extension for hinting at which elements should be considered editable.
+ List of jsonpath selectors. The jsonpath root is the objects: element of the template."
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: list
+ default_disk:
+ description:
+ - "The goal of default disk is to define what kind of disk is supported by the OS mainly in
+ terms of bus (ide, scsi, sata, virtio, ...)"
+ - The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_volume:
+ description:
+ - "The goal of default volume is to be able to configure mostly performance parameters like
+ caches if those are exposed by the underlying volume implementation."
+ - The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_nic:
+ description:
+ - "The goal of default network is similar to I(default_disk) and should be used as a template
+ to ensure OS compatibility and performance."
+ - The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_network:
+ description:
+ - "The goal of default network is similar to I(default_volume) and should be used as a template
+ that specifies performance and connection parameters (L2 bridge for example)"
+ - The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ icon_class:
+ description:
+ - "An icon to be displayed with your template in the web console. Choose from our existing logo
+ icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
+ CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template.
+ You must specify an icon class that exists, or it will prevent falling back to the generic icon."
+ type: str
+ parameters:
+ description:
+ - "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
+ Then, that value is substituted wherever the parameter is referenced. References can be defined in any
+ field in the objects list field. This is useful for generating random passwords or allowing the user to
+ supply a host name or other user-specific value that is required to customize the template."
+ - "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
+ type: list
+ version:
+ description:
+ - Template structure version.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: str
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.kubernetes.k8s_state_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create template 'mytemplate'
+ community.general.kubevirt_template:
+ state: present
+ name: myvmtemplate
+ namespace: templates
+ display_name: Generic cirros template
+ description: Basic cirros template
+ long_description: Verbose description of cirros template
+ provider_display_name: Just Be Cool, Inc.
+ documentation_url: http://theverycoolcompany.com
+ support_url: http://support.theverycoolcompany.com
+ icon_class: icon-linux
+ default_disk:
+ disk:
+ bus: virtio
+ default_nic:
+ model: virtio
+ default_network:
+ resource:
+ resourceName: bridge.network.kubevirt.io/cnvmgmt
+ default_volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ objects:
+ - name: ${NAME}
+ kind: VirtualMachine
+ memory: ${MEMORY_SIZE}
+ state: present
+ namespace: vms
+ parameters:
+ - name: NAME
+ description: VM name
+ generate: expression
+ from: 'vm-[A-Za-z0-9]{8}'
+ - name: MEMORY_SIZE
+ description: Memory size
+ value: 1Gi
+
+- name: Remove template 'myvmtemplate'
+ community.general.kubevirt_template:
+ state: absent
+ name: myvmtemplate
+ namespace: templates
+'''
+
+RETURN = '''
+kubevirt_template:
+ description:
+ - The template dictionary specification returned by the API.
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ API_GROUP,
+ MAX_SUPPORTED_API_VERSION
+)
+
+
+TEMPLATE_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'objects': {
+ 'type': 'list',
+ },
+ 'display_name': {
+ 'type': 'str',
+ },
+ 'description': {
+ 'type': 'str',
+ },
+ 'long_description': {
+ 'type': 'str',
+ },
+ 'provider_display_name': {
+ 'type': 'str',
+ },
+ 'documentation_url': {
+ 'type': 'str',
+ },
+ 'support_url': {
+ 'type': 'str',
+ },
+ 'icon_class': {
+ 'type': 'str',
+ },
+ 'version': {
+ 'type': 'str',
+ },
+ 'editable': {
+ 'type': 'list',
+ },
+ 'default_disk': {
+ 'type': 'dict',
+ },
+ 'default_volume': {
+ 'type': 'dict',
+ },
+ 'default_network': {
+ 'type': 'dict',
+ },
+ 'default_nic': {
+ 'type': 'dict',
+ },
+ 'parameters': {
+ 'type': 'list',
+ },
+}
+
+
+class KubeVirtVMTemplate(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(TEMPLATE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+
+ # Execute the CRUD of VM template:
+ kind = 'Template'
+ template_api_version = 'template.openshift.io/v1'
+
+ # Fill in template parameters:
+ definition['parameters'] = self.params.get('parameters')
+
+ # Fill in the default Label
+ labels = definition['metadata']['labels']
+ labels['template.cnv.io/type'] = 'vm'
+
+ # Fill in Openshift/Kubevirt template annotations:
+ annotations = definition['metadata']['annotations']
+ if self.params.get('display_name'):
+ annotations['openshift.io/display-name'] = self.params.get('display_name')
+ if self.params.get('description'):
+ annotations['description'] = self.params.get('description')
+ if self.params.get('long_description'):
+ annotations['openshift.io/long-description'] = self.params.get('long_description')
+ if self.params.get('provider_display_name'):
+ annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name')
+ if self.params.get('documentation_url'):
+ annotations['openshift.io/documentation-url'] = self.params.get('documentation_url')
+ if self.params.get('support_url'):
+ annotations['openshift.io/support-url'] = self.params.get('support_url')
+ if self.params.get('icon_class'):
+ annotations['iconClass'] = self.params.get('icon_class')
+ if self.params.get('version'):
+ annotations['template.cnv.io/version'] = self.params.get('version')
+
+ # TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params:
+ if self.params.get('editable'):
+ annotations['template.cnv.io/editable'] = self.params.get('editable')
+
+ # Set defaults annotations:
+ if self.params.get('default_disk'):
+ annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name')
+ if self.params.get('default_volume'):
+ annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name')
+ if self.params.get('default_nic'):
+ annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name')
+ if self.params.get('default_network'):
+ annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name')
+
+ # Process objects:
+ self.client = self.get_api_client()
+ definition['objects'] = []
+ objects = self.params.get('objects') or []
+ for obj in objects:
+ if obj['kind'] != 'VirtualMachine':
+ definition['objects'].append(obj)
+ else:
+ vm_definition = virtdict()
+
+ # Set VM defaults:
+ if self.params.get('default_disk'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')]
+ if self.params.get('default_volume'):
+ vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')]
+ if self.params.get('default_nic'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')]
+ if self.params.get('default_network'):
+ vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')]
+
+ # Set kubevirt API version:
+ vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION)
+
+ # Construct k8s vm API object:
+ vm_template = vm_definition['spec']['template']
+ dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj)
+
+ definition['objects'].append(vm_def)
+
+ # Create template:
+ resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates')
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': result['changed'],
+ 'kubevirt_template': result.pop('result'),
+ 'result': result,
+ })
+
+
+def main():
+ module = KubeVirtVMTemplate()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py
new file mode 100644
index 00000000..4466bee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_vm
+
+short_description: Manage KubeVirt virtual machine
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
+ - "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
+ - "I(absent) - Remove a virtual machine."
+ - "I(running) - Create or update a virtual machine and run it."
+ - "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
+ default: "present"
+ choices:
+ - present
+ - absent
+ - running
+ - stopped
+ type: str
+ name:
+ description:
+ - Name of the virtual machine.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine exists.
+ required: true
+ type: str
+ ephemeral:
+ description:
+ - If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
+ - Works only with C(state) I(present) and I(absent).
+ type: bool
+ default: false
+ datavolumes:
+ description:
+ - "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
+ launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
+ it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
+ type: list
+ template:
+ description:
+ - "Name of Template to be used in creation of a virtual machine."
+ type: str
+ template_parameters:
+ description:
+ - "New values of parameters from Template."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Start virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+
+- name: Create virtual machine 'myvm' and start it
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64Mi
+ cpu_cores: 1
+ bootloader: efi
+ smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
+ cpu_model: Conroe
+ headless: true
+ hugepage_size: 2Mi
+ tablets:
+ - bus: virtio
+ name: tablet1
+ cpu_limit: 3
+ cpu_shares: 2
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Create virtual machine 'myvm' with multus network interface
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: vms
+ memory: 512M
+ interfaces:
+ - name: default
+ bridge: {}
+ network:
+ pod: {}
+ - name: mynet
+ bridge: {}
+ network:
+ multus:
+ networkName: mynetconf
+
+- name: Combine inline definition with Ansible parameters
+ community.general.kubevirt_vm:
+ # Kubernetes specification:
+ definition:
+ metadata:
+ labels:
+ app: galaxy
+ service: web
+ origin: vmware
+
+ # Ansible parameters:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64M
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start ephemeral virtual machine 'myvm' and wait to be running
+ community.general.kubevirt_vm:
+ ephemeral: true
+ state: running
+ wait: true
+ wait_timeout: 180
+ name: myvm
+ namespace: vms
+ memory: 64M
+ labels:
+ kubevirt.io/vm: myvm
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start fedora vm with cloud init
+ community.general.kubevirt_vm:
+ state: running
+ wait: true
+ name: myvm
+ namespace: vms
+ memory: 1024M
+ cloud_init_nocloud:
+ userData: |-
+ #cloud-config
+ password: fedora
+ chpasswd: { expire: False }
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/fedora-cloud-container-disk-demo:latest
+ path: /disk/fedora.qcow2
+ disk:
+ bus: virtio
+ node_affinity:
+ soft:
+ - weight: 1
+ term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S2
+
+- name: Create virtual machine with datavolume and specify node affinity
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: default
+ memory: 1024Mi
+ datavolumes:
+ - name: mydv
+ source:
+ http:
+ url: https://url/disk.qcow2
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ storage: 5Gi
+ node_affinity:
+ hard:
+ - term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S1
+
+- name: Remove virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: absent
+ name: myvm
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_vm:
+ description:
+ - The virtual machine dictionary specification returned by the API.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+ VM_SPEC_DEF_ARG_SPEC
+)
+
+VM_ARG_SPEC = {
+ 'ephemeral': {'type': 'bool', 'default': False},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent', 'running', 'stopped'
+ ],
+ 'default': 'present'
+ },
+ 'datavolumes': {'type': 'list'},
+ 'template': {'type': 'str'},
+ 'template_parameters': {'type': 'dict'},
+}
+
+# Which params (can) modify 'spec:' contents of a VM:
+VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
+
+
+class KubeVirtVM(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VM_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _wait_for_vmi_running(self):
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ phase = status.get('phase', None)
+ if phase == 'Running':
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
+
+ def _wait_for_vm_state(self, new_state):
+ if new_state == 'running':
+ want_created = want_ready = True
+ else:
+ want_created = want_ready = False
+
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ created = status.get('created', False)
+ ready = status.get('ready', False)
+ if (created, ready) == (want_created, want_ready):
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
+ "Maybe try a higher wait_timeout value?".format(new_state))
+
+ def manage_vm_state(self, new_state, already_changed):
+ new_running = True if new_state == 'running' else False
+ changed = False
+ k8s_obj = {}
+
+ if not already_changed:
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not k8s_obj:
+ self.fail("VirtualMachine object disappeared during module operation, aborting.")
+ if k8s_obj.spec.get('running', False) == new_running:
+ return False, k8s_obj
+
+ newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
+ k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
+ self.name, self.namespace, merge_type='merge')
+ if err:
+ self.fail_json(**err)
+ else:
+ changed = True
+
+ if self.params.get('wait'):
+ k8s_obj = self._wait_for_vm_state(new_state)
+
+ return changed, k8s_obj
+
+ def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
+ def set_template_default(default_name, default_name_index, definition_spec):
+ default_value = proccess_template['metadata']['annotations'][default_name]
+ if default_value:
+ values = definition_spec[default_name_index]
+ default_values = [d for d in values if d.get('name') == default_value]
+ defaults[default_name_index] = default_values
+ if definition_spec[default_name_index] is None:
+ definition_spec[default_name_index] = []
+ definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
+
+ devices = processedtemplate['spec']['template']['spec']['domain']['devices']
+ spec = processedtemplate['spec']['template']['spec']
+
+ set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
+ set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
+ set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
+ set_template_default('defaults.template.cnv.io/network', 'networks', spec)
+
+ def construct_definition(self, kind, our_state, ephemeral):
+ definition = virtdict()
+ processedtemplate = {}
+
+ # Construct the API object definition:
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+ vm_template = self.params.get('template')
+ if vm_template:
+ # Find the template the VM should be created from:
+ template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
+ proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
+
+ # Set proper template values taken from module option 'template_parameters':
+ for k, v in self.params.get('template_parameters', {}).items():
+ for parameter in proccess_template.parameters:
+ if parameter.name == k:
+ parameter.value = v
+
+ # Proccess the template:
+ processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
+ processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
+
+ # Process defaults of the template:
+ self._process_template_defaults(proccess_template, processedtemplate, defaults)
+
+ if not ephemeral:
+ definition['spec']['running'] = our_state == 'running'
+ template = definition if ephemeral else definition['spec']['template']
+ template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
+ dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
+
+ return self.merge_dicts(definition, processedtemplate)
+
+ def execute_module(self):
+ # Parse parameters specific to this module:
+ ephemeral = self.params.get('ephemeral')
+ k8s_state = our_state = self.params.get('state')
+ kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
+ _used_params = [name for name in self.params if self.params[name] is not None]
+ # Is 'spec:' getting changed?
+ vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
+ changed = False
+ crud_executed = False
+ method = ''
+
+ # Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
+ if ephemeral:
+ # Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
+ if our_state == 'running':
+ self.params['state'] = k8s_state = 'present'
+ elif our_state == 'stopped':
+ self.params['state'] = k8s_state = 'absent'
+ else:
+ if our_state != 'absent':
+ self.params['state'] = k8s_state = 'present'
+
+ # Start with fetching the current object to make sure it exists
+ # If it does, but we end up not performing any operations on it, at least we'll be able to return
+ # its current contents as part of the final json
+ self.client = self.get_api_client()
+ self._kind_resource = self.find_supported_resource(kind)
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
+ self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
+
+ # If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
+ # Also check_mode always warrants a CRUD, as that'll produce a sane result
+ if vm_spec_change or k8s_state == 'absent' or self.check_mode:
+ definition = self.construct_definition(kind, our_state, ephemeral)
+ result = self.execute_crud(kind, definition)
+ changed = result['changed']
+ k8s_obj = result['result']
+ method = result['method']
+ crud_executed = True
+
+ if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
+ # Waiting for k8s_state==absent is handled inside execute_crud()
+ k8s_obj = self._wait_for_vmi_running()
+
+ if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
+ # State==present/absent doesn't involve any additional VMI state management and is fully
+ # handled inside execute_crud() (including wait logic)
+ patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
+ changed = changed or patched
+ if changed:
+ method = method or 'patch'
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_vm': self.fix_serialization(k8s_obj),
+ 'method': method
+ })
+
+
+def main():
+ module = KubeVirtVM()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
new file mode 100644
index 00000000..a35b25b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode
+short_description: Manage instances on the Linode Public Cloud
+description:
+ - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ default: present
+ type: str
+ api_key:
+ description:
+ - Linode API key
+ type: str
+ name:
+ description:
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ required: true
+ type: str
+ displaygroup:
+ description:
+ - Add the instance to a Display Group in Linode Manager.
+ type: str
+ linode_id:
+ description:
+ - Unique ID of a linode server. This value is read-only in the sense that
+ if you specify it on creation of a Linode it will not be used. The
+ Linode API generates these IDs and we can those generated value here to
+ reference a Linode more specifically. This is useful for idempotence.
+ aliases: [ lid ]
+ type: int
+ additional_disks:
+ description:
+ - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
+ - Dictionary takes Size, Label, Type. Size is in MB.
+ type: list
+ alert_bwin_enabled:
+ description:
+ - Set status of bandwidth in alerts.
+ type: bool
+ alert_bwin_threshold:
+ description:
+ - Set threshold in MB of bandwidth in alerts.
+ type: int
+ alert_bwout_enabled:
+ description:
+ - Set status of bandwidth out alerts.
+ type: bool
+ alert_bwout_threshold:
+ description:
+ - Set threshold in MB of bandwidth out alerts.
+ type: int
+ alert_bwquota_enabled:
+ description:
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ type: bool
+ alert_bwquota_threshold:
+ description:
+ - Set threshold in MB of bandwidth quota alerts.
+ type: int
+ alert_cpu_enabled:
+ description:
+ - Set status of receiving CPU usage alerts.
+ type: bool
+ alert_cpu_threshold:
+ description:
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ type: int
+ alert_diskio_enabled:
+ description:
+ - Set status of receiving disk IO alerts.
+ type: bool
+ alert_diskio_threshold:
+ description:
+ - Set threshold for average IO ops/sec over 2 hour period.
+ type: int
+ backupweeklyday:
+ description:
+ - Integer value for what day of the week to store weekly backups.
+ type: int
+ plan:
+ description:
+ - plan to use for the instance (Linode plan)
+ type: int
+ payment_term:
+ description:
+ - payment term to use for the instance (payment term in months)
+ default: 1
+ choices: [ 1, 12, 24 ]
+ type: int
+ password:
+ description:
+ - root password to apply to a new server (auto generated if missing)
+ type: str
+ private_ip:
+ description:
+ - Add private IPv4 address when Linode is created.
+ - Default is C(false).
+ type: bool
+ ssh_pub_key:
+ description:
+ - SSH public key applied to root user
+ type: str
+ swap:
+ description:
+ - swap size in MB
+ default: 512
+ type: int
+ distribution:
+ description:
+ - distribution to use for the instance (Linode Distribution)
+ type: int
+ datacenter:
+ description:
+ - datacenter to create an instance in (Linode Datacenter)
+ type: int
+ kernel_id:
+ description:
+ - kernel to use for the instance (Linode Kernel)
+ type: int
+ wait:
+ description:
+ - wait for the instance to be in state C(running) before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+ type: int
+ watchdog:
+ description:
+ - Set status of Lassie watchdog.
+ type: bool
+ default: "True"
+requirements:
+ - python >= 2.6
+ - linode-python
+author:
+- Vincent Viallet (@zbal)
+notes:
+ - Please note, linode-python does not have python 3 support.
+ - This module uses the now deprecated v3 of the Linode API.
+ - C(LINODE_API_KEY) env variable can be used instead.
+ - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
+'''
+
+EXAMPLES = '''
+
+- name: Create a new Linode
+ community.general.linode:
+ name: linode-test1
+ plan: 1
+ datacenter: 7
+ distribution: 129
+ state: present
+ register: linode_creation
+
+- name: Create a server with a private IP Address
+ community.general.linode:
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Fully configure new server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: True
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: True
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: True
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: True
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: True
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
+ - {Label: 'disk1', Size: 2500, Type: 'raw'}
+ - {Label: 'newdisk', Size: 2000}
+ watchdog: True
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Ensure a running server (create if missing)
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Delete a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Stop a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
+ delegate_to: localhost
+
+- name: Reboot a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
+ delegate_to: localhost
+'''
+
+import os
+import time
+import traceback
+
+LINODE_IMP_ERR = None
+try:
+ from linode import api as linode_api
+ HAS_LINODE = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def randompass():
+ '''
+ Generate a long random password that comply to Linode requirements
+ '''
+ # Linode API currently requires the following:
+ # It must contain at least two of these four character classes:
+ # lower case letters - upper case letters - numbers - punctuation
+ # we play it safe :)
+ import random
+ import string
+ # as of python 2.4, this reseeds the PRNG from urandom
+ random.seed()
+ lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
+ upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
+ number = ''.join(random.choice(string.digits) for x in range(6))
+ punct = ''.join(random.choice(string.punctuation) for x in range(6))
+ p = lower + upper + number + punct
+ return ''.join(random.sample(p, len(p)))
+
+
+def getInstanceDetails(api, server):
+ '''
+ Return the details of an instance, populating IPs, etc.
+ '''
+ instance = {'id': server['LINODEID'],
+ 'name': server['LABEL'],
+ 'public': [],
+ 'private': []}
+
+ # Populate with ips
+ for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
+ if ip['ISPUBLIC'] and 'ipv4' not in instance:
+ instance['ipv4'] = ip['IPADDRESS']
+ instance['fqdn'] = ip['RDNS_NAME']
+ if ip['ISPUBLIC']:
+ instance['public'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ else:
+ instance['private'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ return instance
+
+
+def linodeServers(module, api, state, name,
+ displaygroup, plan, additional_disks, distribution,
+ datacenter, kernel_id, linode_id, payment_term, password,
+ private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
+ instances = []
+ changed = False
+ new_server = False
+ servers = []
+ disks = []
+ configs = []
+ jobs = []
+
+ # See if we can match an existing server details with the provided linode_id
+ if linode_id:
+ # For the moment we only consider linode_id as criteria for match
+ # Later we can use more (size, name, etc.) and update existing
+ servers = api.linode_list(LinodeId=linode_id)
+ # Attempt to fetch details about disks and configs only if servers are
+ # found with linode_id
+ if servers:
+ disks = api.linode_disk_list(LinodeId=linode_id)
+ configs = api.linode_config_list(LinodeId=linode_id)
+
+ # Act on the state
+ if state in ('active', 'present', 'started'):
+ # TODO: validate all the plan / distribution / datacenter are valid
+
+ # Multi step process/validation:
+ # - need linode_id (entity)
+ # - need disk_id for linode_id - create disk from distrib
+ # - need config_id for linode_id - create config (need kernel)
+
+ # Any create step triggers a job that need to be waited for.
+ if not servers:
+ for arg in (name, plan, distribution, datacenter):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead
+ # Create linode entity
+ new_server = True
+
+ # Get size of all individually listed disks to subtract from Distribution disk
+ used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
+
+ try:
+ res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
+ PaymentTerm=payment_term)
+ linode_id = res['LinodeID']
+ # Update linode Label to match name
+ api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
+ # Update Linode with Ansible configuration options
+ api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
+ # Save server
+ servers = api.linode_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Add private IP to Linode
+ if private_ip:
+ try:
+ res = api.linode_ip_addprivate(LinodeID=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not disks:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create disks (1 from distrib, 1 for SWAP)
+ new_server = True
+ try:
+ if not password:
+ # Password is required on creation, if not provided generate one
+ password = randompass()
+ if not swap:
+ swap = 512
+ # Create data disk
+ size = servers[0]['TOTALHD'] - used_disk_space - swap
+
+ if ssh_pub_key:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password, rootSSHKey=ssh_pub_key,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ else:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ jobs.append(res['JobID'])
+ # Create SWAP disk
+ res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
+ Label='%s swap disk (lid: %s)' % (name, linode_id),
+ Size=swap)
+ # Create individually listed disks at specified size
+ if additional_disks:
+ for disk in additional_disks:
+ # If a disk Type is not passed in, default to ext4
+ if disk.get('Type') is None:
+ disk['Type'] = 'ext4'
+ res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
+
+ jobs.append(res['JobID'])
+ except Exception as e:
+ # TODO: destroy linode ?
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not configs:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+
+ # Check architecture
+ for distrib in api.avail_distributions():
+ if distrib['DISTRIBUTIONID'] != distribution:
+ continue
+ arch = '32'
+ if distrib['IS64BIT']:
+ arch = '64'
+ break
+
+ # Get latest kernel matching arch if kernel_id is not specified
+ if not kernel_id:
+ for kernel in api.avail_kernels():
+ if not kernel['LABEL'].startswith('Latest %s' % arch):
+ continue
+ kernel_id = kernel['KERNELID']
+ break
+
+ # Get disk list
+ disks_id = []
+ for disk in api.linode_disk_list(LinodeId=linode_id):
+ if disk['TYPE'] == 'ext3':
+ disks_id.insert(0, str(disk['DISKID']))
+ continue
+ disks_id.append(str(disk['DISKID']))
+ # Trick to get the 9 items in the list
+ while len(disks_id) < 9:
+ disks_id.append('')
+ disks_list = ','.join(disks_id)
+
+ # Create config
+ new_server = True
+ try:
+ api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
+ Disklist=disks_list, Label='%s config' % name)
+ configs = api.linode_config_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Start / Ensure servers are running
+ for server in servers:
+ # Refresh server state
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # Ensure existing servers are up and running, boot if necessary
+ if server['STATUS'] != 1:
+ res = api.linode_boot(LinodeId=linode_id)
+ jobs.append(res['JobID'])
+ changed = True
+
+ # wait here until the instances are up
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ # refresh the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # status:
+ # -2: Boot failed
+ # 1: Running
+ if server['STATUS'] in (-2, 1):
+ break
+ time.sleep(5)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
+ # Get a fresh copy of the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ if server['STATUS'] == -2:
+ module.fail_json(msg='%s (lid: %s) failed to boot' %
+ (server['LABEL'], server['LINODEID']))
+ # From now on we know the task is a success
+ # Build instance report
+ instance = getInstanceDetails(api, server)
+ # depending on wait flag select the status
+ if wait:
+ instance['status'] = 'Running'
+ else:
+ instance['status'] = 'Starting'
+
+ # Return the root password if this is a new box and no SSH key
+ # has been provided
+ if new_server and not ssh_pub_key:
+ instance['password'] = password
+ instances.append(instance)
+
+ elif state in ('stopped'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for stopped state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ if server['STATUS'] != 2:
+ try:
+ res = api.linode_shutdown(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Stopping'
+ changed = True
+ else:
+ instance['status'] = 'Stopped'
+ instances.append(instance)
+
+ elif state in ('restarted'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for restarted state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ res = api.linode_reboot(LinodeId=server['LINODEID'])
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Restarting'
+ changed = True
+ instances.append(instance)
+
+ elif state in ('absent', 'deleted'):
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Deleting'
+ changed = True
+ instances.append(instance)
+
+ # Ease parsing if only 1 instance
+ if len(instances) == 1:
+ module.exit_json(changed=changed, instance=instances[0])
+
+ module.exit_json(changed=changed, instances=instances)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
+ api_key=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ alert_bwin_enabled=dict(type='bool'),
+ alert_bwin_threshold=dict(type='int'),
+ alert_bwout_enabled=dict(type='bool'),
+ alert_bwout_threshold=dict(type='int'),
+ alert_bwquota_enabled=dict(type='bool'),
+ alert_bwquota_threshold=dict(type='int'),
+ alert_cpu_enabled=dict(type='bool'),
+ alert_cpu_threshold=dict(type='int'),
+ alert_diskio_enabled=dict(type='bool'),
+ alert_diskio_threshold=dict(type='int'),
+ backupsenabled=dict(type='int'),
+ backupweeklyday=dict(type='int'),
+ backupwindow=dict(type='int'),
+ displaygroup=dict(type='str', default=''),
+ plan=dict(type='int'),
+ additional_disks=dict(type='list'),
+ distribution=dict(type='int'),
+ datacenter=dict(type='int'),
+ kernel_id=dict(type='int'),
+ linode_id=dict(type='int', aliases=['lid']),
+ payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
+ password=dict(type='str', no_log=True),
+ private_ip=dict(type='bool'),
+ ssh_pub_key=dict(type='str'),
+ swap=dict(type='int', default=512),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ watchdog=dict(type='bool', default=True),
+ ),
+ )
+
+ if not HAS_LINODE:
+ module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
+
+ state = module.params.get('state')
+ api_key = module.params.get('api_key')
+ name = module.params.get('name')
+ alert_bwin_enabled = module.params.get('alert_bwin_enabled')
+ alert_bwin_threshold = module.params.get('alert_bwin_threshold')
+ alert_bwout_enabled = module.params.get('alert_bwout_enabled')
+ alert_bwout_threshold = module.params.get('alert_bwout_threshold')
+ alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
+ alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
+ alert_cpu_enabled = module.params.get('alert_cpu_enabled')
+ alert_cpu_threshold = module.params.get('alert_cpu_threshold')
+ alert_diskio_enabled = module.params.get('alert_diskio_enabled')
+ alert_diskio_threshold = module.params.get('alert_diskio_threshold')
+ backupsenabled = module.params.get('backupsenabled')
+ backupweeklyday = module.params.get('backupweeklyday')
+ backupwindow = module.params.get('backupwindow')
+ displaygroup = module.params.get('displaygroup')
+ plan = module.params.get('plan')
+ additional_disks = module.params.get('additional_disks')
+ distribution = module.params.get('distribution')
+ datacenter = module.params.get('datacenter')
+ kernel_id = module.params.get('kernel_id')
+ linode_id = module.params.get('linode_id')
+ payment_term = module.params.get('payment_term')
+ password = module.params.get('password')
+ private_ip = module.params.get('private_ip')
+ ssh_pub_key = module.params.get('ssh_pub_key')
+ swap = module.params.get('swap')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ watchdog = int(module.params.get('watchdog'))
+
+ kwargs = dict()
+ check_items = dict(
+ alert_bwin_enabled=alert_bwin_enabled,
+ alert_bwin_threshold=alert_bwin_threshold,
+ alert_bwout_enabled=alert_bwout_enabled,
+ alert_bwout_threshold=alert_bwout_threshold,
+ alert_bwquota_enabled=alert_bwquota_enabled,
+ alert_bwquota_threshold=alert_bwquota_threshold,
+ alert_cpu_enabled=alert_cpu_enabled,
+ alert_cpu_threshold=alert_cpu_threshold,
+ alert_diskio_enabled=alert_diskio_enabled,
+ alert_diskio_threshold=alert_diskio_threshold,
+ backupweeklyday=backupweeklyday,
+ backupwindow=backupwindow,
+ )
+
+ for key, value in check_items.items():
+ if value is not None:
+ kwargs[key] = value
+
+ # Setup the api_key
+ if not api_key:
+ try:
+ api_key = os.environ['LINODE_API_KEY']
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ # setup the auth
+ try:
+ api = linode_api.Api(api_key)
+ api.test_echo()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ linodeServers(module, api, state, name,
+ displaygroup, plan,
+ additional_disks, distribution, datacenter, kernel_id, linode_id,
+ payment_term, password, private_ip, ssh_pub_key, swap, wait,
+ wait_timeout, watchdog, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
new file mode 100644
index 00000000..17a697b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode_v4
+short_description: Manage instances on the Linode cloud.
+description: Manage instances on the Linode cloud.
+requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+author:
+ - Luke Murphy (@decentral1se)
+notes:
+ - No Linode resizing is currently implemented. This module will, in time,
+ replace the current Linode module which uses deprecated API bindings on the
+ Linode side.
+options:
+ region:
+ description:
+ - The region of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/regions/).
+ required: false
+ type: str
+ image:
+ description:
+ - The image of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/images/).
+ type: str
+ required: false
+ type:
+ description:
+ - The type of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/linode-types/).
+ type: str
+ required: false
+ label:
+ description:
+ - The instance label. This label is used as the main determiner for
+ idempotence for the module and is therefore mandatory.
+ type: str
+ required: true
+ group:
+ description:
+ - The group that the instance should be marked under. Please note, that
+ group labelling is deprecated but still supported. The encouraged
+ method for marking instances is to use tags.
+ type: str
+ required: false
+ tags:
+ description:
+ - The tags that the instance should be marked under. See
+ U(https://www.linode.com/docs/api/tags/).
+ required: false
+ type: list
+ root_pass:
+ description:
+ - The password for the root user. If not specified, one will be
+ generated. This generated password will be available in the task
+ success JSON.
+ required: false
+ type: str
+ authorized_keys:
+ description:
+ - A list of SSH public key parts to deploy for the root user.
+ required: false
+ type: list
+ state:
+ description:
+ - The desired instance state.
+ type: str
+ choices:
+ - present
+ - absent
+ required: true
+ access_token:
+ description:
+ - The Linode API v4 access token. It may also be specified by exposing
+ the C(LINODE_ACCESS_TOKEN) environment variable. See
+ U(https://www.linode.com/docs/api#access-and-authentication).
+ required: true
+ type: str
+ stackscript_id:
+ description:
+ - The numeric ID of the StackScript to use when creating the instance.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: int
+ version_added: 1.3.0
+ stackscript_data:
+ description:
+ - An object containing arguments to any User Defined Fields present in
+ the StackScript used when creating the instance.
+ Only valid when a stackscript_id is provided.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: dict
+ version_added: 1.3.0
+'''
+
+EXAMPLES = """
+- name: Create a new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ type: g6-nanode-1
+ region: eu-west
+ image: linode/debian9
+ root_pass: passw0rd
+ authorized_keys:
+ - "ssh-rsa ..."
+ stackscript_id: 1337
+ stackscript_data:
+ variable: value
+ state: present
+
+- name: Delete that new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ state: absent
+"""
+
+RETURN = """
+instance:
+ description: The instance description in JSON serialized form.
+ returned: Always.
+ type: dict
+ sample: {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+
+LINODE_IMP_ERR = None
+try:
+ from linode_api4 import Instance, LinodeClient
+ HAS_LINODE_DEPENDENCY = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE_DEPENDENCY = False
+
+
+def create_linode(module, client, **kwargs):
+ """Creates a Linode instance and handles return format."""
+ if kwargs['root_pass'] is None:
+ kwargs.pop('root_pass')
+
+ try:
+ response = client.linode.instance_create(**kwargs)
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+ try:
+ if isinstance(response, tuple):
+ instance, root_pass = response
+ instance_json = instance._raw_json
+ instance_json.update({'root_pass': root_pass})
+ return instance_json
+ else:
+ return response._raw_json
+ except TypeError:
+ module.fail_json(msg='Unable to parse Linode instance creation'
+ ' response. Please raise a bug against this'
+ ' module on https://github.com/ansible/ansible/issues'
+ )
+
+
+def maybe_instance_from_label(module, client):
+ """Try to retrieve an instance based on a label."""
+ try:
+ label = module.params['label']
+ result = client.linode.instances(Instance.label == label)
+ return result[0]
+ except IndexError:
+ return None
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+
+def initialise_module():
+ """Initialise the module parameter specification."""
+ return AnsibleModule(
+ argument_spec=dict(
+ label=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ access_token=dict(
+ type='str',
+ required=True,
+ no_log=True,
+ fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
+ ),
+ authorized_keys=dict(type='list', required=False),
+ group=dict(type='str', required=False),
+ image=dict(type='str', required=False),
+ region=dict(type='str', required=False),
+ root_pass=dict(type='str', required=False, no_log=True),
+ tags=dict(type='list', required=False),
+ type=dict(type='str', required=False),
+ stackscript_id=dict(type='int', required=False),
+ stackscript_data=dict(type='dict', required=False),
+ ),
+ supports_check_mode=False,
+ required_one_of=(
+ ['state', 'label'],
+ ),
+ required_together=(
+ ['region', 'image', 'type'],
+ )
+ )
+
+
+def build_client(module):
+ """Build a LinodeClient."""
+ return LinodeClient(
+ module.params['access_token'],
+ user_agent=get_user_agent('linode_v4_module')
+ )
+
+
+def main():
+ """Module entrypoint."""
+ module = initialise_module()
+
+ if not HAS_LINODE_DEPENDENCY:
+ module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
+
+ client = build_client(module)
+ instance = maybe_instance_from_label(module, client)
+
+ if module.params['state'] == 'present' and instance is not None:
+ module.exit_json(changed=False, instance=instance._raw_json)
+
+ elif module.params['state'] == 'present' and instance is None:
+ instance_json = create_linode(
+ module, client,
+ authorized_keys=module.params['authorized_keys'],
+ group=module.params['group'],
+ image=module.params['image'],
+ label=module.params['label'],
+ region=module.params['region'],
+ root_pass=module.params['root_pass'],
+ tags=module.params['tags'],
+ ltype=module.params['type'],
+ stackscript=module.params['stackscript_id'],
+ stackscript_data=module.params['stackscript_data'],
+ )
+ module.exit_json(changed=True, instance=instance_json)
+
+ elif module.params['state'] == 'absent' and instance is not None:
+ instance.delete()
+ module.exit_json(changed=True, instance=instance._raw_json)
+
+ elif module.params['state'] == 'absent' and instance is None:
+ module.exit_json(changed=False, instance={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
new file mode 100644
index 00000000..c1a3d1c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
@@ -0,0 +1,1760 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lxc_container
+short_description: Manage LXC Containers
+description:
+ - Management of LXC containers.
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to C($CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under PATH.
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: 'no'
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: 'no'
+ archive:
+ description:
+ - Create an archive of a container.
+ - This will create a tarball of the running container.
+ type: bool
+ default: 'no'
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method will attempt to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using I(clone_name) the newly cloned
+ container created in a stopped state.
+ - The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ itself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+'''
+
+EXAMPLES = r"""
+- name: Create a started container
+ community.general.lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ community.general.lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ community.general.lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ community.general.lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ community.general.lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ ansible.builtin.debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ community.general.lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ community.general.lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ community.general.lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ community.general.lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ community.general.lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: Debug info on container "test-container"
+ ansible.builtin.debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ community.general.lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ community.general.lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ community.general.lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN = r"""
+lxc_container:
+ description: container information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: bool
+ sample: True
+"""
+
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+import time
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_text, to_bytes
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables-lxc-copy': {
+ 'backing_store': '--backingstorage',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--name',
+ 'clone_name': '--newname'
+ },
+ # lxc-clone is deprecated in favor of lxc-copy
+ 'variables-lxc-clone': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ unset HOSTNAME
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700', 8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = BOOLEANS_FALSE.union([None, ''])
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
+
+ parsed_options = [i.split('=', 1) for i in _container_config]
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ # lxc-clone is deprecated in favor of lxc-copy
+ clone_vars = 'variables-lxc-copy'
+ clone_cmd = self.module.get_bin_path('lxc-copy')
+ if not clone_cmd:
+ clone_vars = 'variables-lxc-clone'
+ clone_cmd = self.module.get_bin_path('lxc-clone', True)
+
+ build_command = [
+ clone_cmd,
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone'][clone_vars]
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing %s." % os.path.basename(clone_cmd)
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name': self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077', 8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='list',
+ elements='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default=False
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default=False
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if=([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
new file mode 100644
index 00000000..119387f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
@@ -0,0 +1,710 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ architecture:
+ description:
+ - The architecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ type: dict
+ required: false
+ profiles:
+ description:
+ - Profile to be used by the container
+ type: list
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).'
+ - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. Will attempt to create a container on a target node.
+ If container exists elsewhere in a cluster, then container will not be replaced or moved.
+ The name should respond to same name of the node you see in C(lxc cluster list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ type: int
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the C(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ type: bool
+ force_stop:
+ description:
+ - If this is true, the C(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the created container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd # if you get a 404, try setting protocol: simplestreams
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: Check python is installed in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: Install python in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for creating an Ubuntu 14.04 container using an image fingerprint.
+# This requires changing 'server' and 'protocol' key values, replacing the
+# 'alias' key with with 'fingerprint' and supplying an appropriate value that
+# matches the container image you wish to use.
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ # Provides current (and older) Ubuntu images with listed fingerprints
+ server: https://cloud-images.ubuntu.com/releases
+ # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
+ protocol: simplestreams
+ # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
+ fingerprint: e9a8bdfab6dc
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: absent
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ ansible.builtin.fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+
+# An example for LXD cluster deployments. This example will create two new container on specific
+# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
+# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
+# LXD API calls can be made to any LXD member, in this example, we send API requests to
+#'node01.example.com', which matches ansible inventory name.
+- hosts: node01.example.com
+ tasks:
+ - name: Create LXD container
+ community.general.lxd_container:
+ name: new-container-1
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node01
+
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-container-2
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node02
+'''
+
+RETURN = '''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: dict
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: str
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+import datetime
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+ self.target = self.module.params['target']
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json = {'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ if self.target:
+ self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
+ else:
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=None):
+ ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
+
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ for k, v in self.config['config'].items():
+ if k not in old_configs:
+ return True
+ if old_configs[k] != v:
+ return True
+ return False
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ target=dict(
+ type='str',
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
new file mode 100644
index 00000000..ccd74d42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ type: str
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ community.general.lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.items():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
new file mode 100644
index 00000000..6eefe133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_dns_reload
+author: "Simon Weald (@glitchcrab)"
+short_description: Request reload of Memset's DNS infrastructure,
+notes:
+ - DNS reload requests are a best-effort service provided by Memset; these generally
+ happen every 15 minutes by default, however you can request an immediate reload if
+ later tasks rely on the records being created. An API key generated via the
+ Memset customer control panel is required with the following minimum scope -
+ I(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then I(job.status) is also required.
+description:
+ - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - Boolean value, if set will poll the reload job's status and return
+ when the job has completed (unless the 30 second timeout is reached first).
+ If the timeout is reached then the task will not be marked as failed, but
+ stderr will indicate that the polling failed.
+'''
+
+EXAMPLES = '''
+- name: Submit DNS reload and poll
+ community.general.memset_dns_reload:
+ api_key: 5eb86c9196ab03919abcf03857163741
+ poll: True
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Raw response from the Memset API.
+ returned: always
+ type: complex
+ contains:
+ error:
+ description: Whether the job ended in error state.
+ returned: always
+ type: bool
+ sample: true
+ finished:
+ description: Whether the job completed before the result was returned.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description: Job ID.
+ returned: always
+ type: str
+ sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
+ status:
+ description: Job status.
+ returned: always
+ type: str
+ sample: "DONE"
+ type:
+ description: Job type.
+ returned: always
+ type: str
+ sample: "dns"
+'''
+
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def poll_reload_status(api_key=None, job_id=None, payload=None):
+ '''
+ We poll the `job.status` endpoint every 5 seconds up to a
+ maximum of 6 times. This is a relatively arbitrary choice of
+ timeout, however requests rarely take longer than 15 seconds
+ to complete.
+ '''
+ memset_api, stderr, msg = None, None, None
+ payload['id'] = job_id
+
+ api_method = 'job.status'
+ _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+
+ while not response.json()['finished']:
+ counter = 0
+ while counter < 6:
+ sleep(5)
+ _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+ counter += 1
+ if response.json()['error']:
+ # the reload job was submitted but polling failed. Don't return this as an overall task failure.
+ stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
+ else:
+ memset_api = response.json()
+ msg = None
+
+ return(memset_api, msg, stderr)
+
+
+def reload_dns(args=None):
+ '''
+ DNS reloads are a single API call and therefore there's not much
+ which can go wrong outside of auth errors.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ memset_api, msg, stderr = None, None, None
+
+ api_method = 'dns.reload'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['memset_api'] = response.json()
+ retvals['msg'] = msg
+ return(retvals)
+
+ # set changed to true if the reload request was accepted.
+ has_changed = True
+ memset_api = msg
+ # empty msg var as we don't want to return the API's json response twice.
+ msg = None
+
+ if args['poll']:
+ # hand off to the poll function.
+ job_id = response.json()['id']
+ memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
+
+ # assemble return variables.
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ poll=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = reload_dns(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
new file mode 100644
index 00000000..9ef798bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone
+author: "Simon Weald (@glitchcrab)"
+short_description: Creates and deletes Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+description:
+ - Manage DNS zones in a Memset account.
+options:
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this
+ value has at most 250 characters.
+ type: str
+ aliases: [ nickname ]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+'''
+
+EXAMPLES = '''
+# Create the zone 'test'
+- name: Create zone
+ community.general.memset_zone:
+ name: test
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ ttl: 300
+ delegate_to: localhost
+
+# Force zone deletion
+- name: Force delete zone
+ community.general.memset_zone:
+ name: test
+ state: absent
+ api_key: 5eb86c9196ab03919abcf03857163741
+ force: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Zone info from the Memset API
+ returned: when state == present
+ type: complex
+ contains:
+ domains:
+ description: List of domains in this zone
+ returned: always
+ type: list
+ sample: []
+ id:
+ description: Zone id
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ nickname:
+ description: Zone name
+ returned: always
+ type: str
+ sample: "example.com"
+ records:
+ description: List of DNS records for domains in this zone
+ returned: always
+ type: list
+ sample: []
+ ttl:
+ description: Default TTL for domains in this zone
+ returned: always
+ type: int
+ sample: 300
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ # zone domain length must be less than 250 chars.
+ if len(args['name']) > 250:
+ stderr = 'Zone name must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr, stderr=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+
+ api_method = 'dns.zone_list'
+ has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, counter = check_zone(data=response, name=args['name'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone(args=None, zone_exists=None, payload=None):
+ '''
+ At this point we already know whether the zone exists, so we
+ just need to make the API reflect the desired state.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if not zone_exists:
+ payload['ttl'] = args['ttl']
+ payload['nickname'] = args['name']
+ api_method = 'dns.zone_create'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ else:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ break
+ if zone['ttl'] != args['ttl']:
+ # update the zone if the desired TTL is different.
+ payload['id'] = zone['id']
+ payload['ttl'] = args['ttl']
+ api_method = 'dns.zone_update'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ # populate return var with zone info.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if zone_exists:
+ payload = dict()
+ payload['id'] = zone_id
+ api_method = 'dns.zone_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ memset_api = response.json()
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def delete_zone(args=None, zone_exists=None, payload=None):
+ '''
+ Deletion requires extra sanity checking as the zone cannot be
+ deleted if it contains domains or records. Setting force=true
+ will override this behaviour.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if zone_exists:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ counter = 0
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ counter += 1
+ if counter == 1:
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ zone_id = zone['id']
+ domain_count = len(zone['domains'])
+ record_count = len(zone['records'])
+ if (domain_count > 0 or record_count > 0) and args['force'] is False:
+ # we need to fail out if force was not explicitly set.
+ stderr = 'Zone contains domains or records and force was not used.'
+ has_failed = True
+ has_changed = False
+ module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
+ api_method = 'dns.zone_delete'
+ payload['id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
+ memset_api = msg
+ msg = None
+ else:
+ # zone names are not unique, so we cannot safely delete the requested
+ # zone at this time.
+ has_failed = True
+ has_changed = False
+ msg = 'Unable to delete zone as multiple zones with the same name exist.'
+ else:
+ has_failed, has_changed = False, False
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = _msg
+
+ return(retvals)
+
+ zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if args['state'] == 'present':
+ has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ elif args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, aliases=['nickname'], type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
new file mode 100644
index 00000000..4aa0eada
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_domain
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete domains in Memset DNS zones.
+notes:
+ - Zone domains can be thought of as a collection of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should
+ be created using C(with_items).
+description:
+ - Manage DNS zone domains in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+'''
+
+EXAMPLES = '''
+# Create the zone domain 'test.com'
+- name: Create zone domain
+ community.general.memset_zone_domain:
+ domain: test.com
+ zone: testzone
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Domain info from the Memset API
+ returned: when changed or state == present
+ type: complex
+ contains:
+ domain:
+ description: Domain name
+ returned: always
+ type: str
+ sample: "example.com"
+ id:
+ description: Domain ID
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
+ '''
+ # zone domain length must be less than 250 chars
+ if len(args['domain']) > 250:
+ stderr = 'Zone domain must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+ has_changed = False
+
+ api_method = 'dns.zone_domain_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
+ '''
+ At this point we already know whether the containing zone exists,
+ so we just need to create the domain (or exit if it already exists).
+ '''
+ has_changed, has_failed = False, False
+ msg = None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ for zone_domain in response.json():
+ if zone_domain['domain'] == args['domain']:
+ # zone domain already exists, nothing to change.
+ has_changed = False
+ break
+ else:
+ # we need to create the domain
+ api_method = 'dns.zone_domain_create'
+ payload['domain'] = args['domain']
+ payload['zone_id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ return(has_failed, has_changed, msg)
+
+
+def delete_zone_domain(args=None, payload=None):
+ '''
+ Deletion is pretty simple, domains are always unique so we
+ we don't need to do any sanity checking to avoid deleting the
+ wrong thing.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ if domain_exists:
+ api_method = 'dns.zone_domain_delete'
+ payload['domain'] = args['domain']
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = response.json()
+ # unset msg as we don't want to return unnecessary info to the user.
+ msg = None
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete_domain(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ # the zone needs to be unique - this isn't a requirement of Memset's API but it
+ # makes sense in the context of this module.
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
+
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ return(retvals)
+
+ if args['state'] == 'present':
+ has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
+
+ if args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ domain=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(required=True, type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete_domain(args)
+
+ # we would need to populate the return values with the API's response
+ # in several places so it's easier to do it at the end instead.
+ if not retvals['failed']:
+ if args['state'] == 'present' and not module.check_mode:
+ payload = dict()
+ payload['domain'] = args['domain']
+ api_method = 'dns.zone_domain_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ retvals['memset_api'] = response.json()
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
new file mode 100644
index 00000000..981d2ac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_record
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete records in Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records
+ should be created using C(with_items).
+description:
+ - Manage DNS records in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ ip, data ]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
+ and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+'''
+
+EXAMPLES = '''
+# Create DNS record for www.domain.com
+- name: Create DNS record
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: A
+ record: www
+ address: 1.2.3.4
+ ttl: 300
+ relative: false
+ delegate_to: localhost
+
+# create an SPF record for domain.com
+- name: Create SPF record for domain.com
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
+ delegate_to: localhost
+
+# create multiple DNS records
+- name: Create multiple DNS records
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ zone: "{{ item.zone }}"
+ type: "{{ item.type }}"
+ record: "{{ item.record }}"
+ address: "{{ item.address }}"
+ delegate_to: localhost
+ with_items:
+ - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
+ - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
+'''
+
+RETURN = '''
+memset_api:
+ description: Record info from the Memset API.
+ returned: when state == present
+ type: complex
+ contains:
+ address:
+ description: Record content (may be an IP, string or blank depending on record type).
+ returned: always
+ type: str
+ sample: 1.1.1.1
+ id:
+ description: Record ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ priority:
+ description: Priority for C(MX) and C(SRV) records.
+ returned: always
+ type: int
+ sample: 10
+ record:
+ description: Name of record.
+ returned: always
+ type: str
+ sample: "www"
+ relative:
+ description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
+ returned: always
+ type: bool
+ sample: False
+ ttl:
+ description: Record TTL.
+ returned: always
+ type: int
+ sample: 10
+ type:
+ description: Record type.
+ returned: always
+ type: str
+ sample: AAAA
+ zone_id:
+ description: Zone ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ failed_validation = False
+
+ # priority can only be integer 0 > 999
+ if not 0 <= args['priority'] <= 999:
+ failed_validation = True
+ error = 'Priority must be in the range 0 > 999 (inclusive).'
+ # data value must be max 250 chars
+ if len(args['address']) > 250:
+ failed_validation = True
+ error = "Address must be less than 250 characters in length."
+ # record value must be max 250 chars
+ if args['record']:
+ if len(args['record']) > 63:
+ failed_validation = True
+ error = "Record must be less than 63 characters in length."
+ # relative isn't used for all record types
+ if args['relative']:
+ if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
+ failed_validation = True
+ error = "Relative is only valid for CNAME, MX, NS and SRV record types."
+ # if any of the above failed then fail early
+ if failed_validation:
+ module.fail_json(failed=True, msg=error)
+
+
+def create_zone_record(args=None, zone_id=None, records=None, payload=None):
+ '''
+ Sanity checking has already occurred prior to this function being
+ called, so we can go ahead and either create or update the record.
+ As defaults are defined for all values in the argument_spec, this
+ may cause some changes to occur as the defaults are enforced (if
+ the user has only configured required variables).
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # assemble the new record.
+ new_record = dict()
+ new_record['zone_id'] = zone_id
+ for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
+ new_record[arg] = args[arg]
+
+ # if we have any matches, update them.
+ if records:
+ for zone_record in records:
+ # record exists, add ID to payload.
+ new_record['id'] = zone_record['id']
+ if zone_record == new_record:
+ # nothing to do; record is already correct so we populate
+ # the return var with the existing record's details.
+ memset_api = zone_record
+ return(has_changed, has_failed, memset_api, msg)
+ else:
+ # merge dicts ensuring we change any updated values
+ payload = zone_record.copy()
+ payload.update(new_record)
+ api_method = 'dns.zone_record_update'
+ if args['check_mode']:
+ has_changed = True
+ # return the new record to the user in the returned var.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+ else:
+ # no record found, so we need to create it
+ api_method = 'dns.zone_record_create'
+ payload = new_record
+ if args['check_mode']:
+ has_changed = True
+ # populate the return var with the new record's details.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def delete_zone_record(args=None, records=None, payload=None):
+ '''
+ Matching records can be cleanly deleted without affecting other
+ resource types, so this is pretty simple to achieve.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # if we have any matches, delete them.
+ if records:
+ for zone_record in records:
+ if args['check_mode']:
+ has_changed = True
+ return(has_changed, has_failed, memset_api, msg)
+ payload['id'] = zone_record['id']
+ api_method = 'dns.zone_record_delete'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = zone_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete functions.
+ Check mode is integrated into the create or delete functions.
+ '''
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+ retvals, payload = dict(), dict()
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone {0} does not exist." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones." . format(args['zone'])
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ retvals['stderr'] = stderr
+ return(retvals)
+
+ # get a list of all records ( as we can't limit records by zone)
+ api_method = 'dns.zone_record_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ # find any matching records
+ records = [record for record in response.json() if record['zone_id'] == zone_id
+ and record['record'] == args['record'] and record['type'] == args['type']]
+
+ if args['state'] == 'present':
+ has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
+
+ if args['state'] == 'absent':
+ has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ zone=dict(required=True, type='str'),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
+ address=dict(required=True, aliases=['ip', 'data'], type='str'),
+ record=dict(required=False, default='', type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(required=False, default=0, type='int'),
+ relative=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # perform some Memset API-specific validation
+ api_validation(args=args)
+
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
new file mode 100644
index 00000000..2efb90cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ type: str
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ community.general.cloud_init_data_facts:
+ register: result
+
+- ansible.builtin.debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ community.general.cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py
new file mode 100644
index 00000000..dd592d6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# (c) 2016, Flavio Percoco <flavio@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: For more details https://github.com/ansible/ansible/issues/61546.
+ alternative: Use M(community.kubernetes.helm) instead.
+module: helm
+short_description: Manages Kubernetes packages with the Helm package manager
+author: "Flavio Percoco (@flaper87)"
+description:
+ - Install, upgrade, delete and list packages with the Helm package manager.
+requirements:
+ - "pyhelm"
+ - "grpcio"
+options:
+ host:
+ description:
+ - Tiller's server host.
+ type: str
+ default: "localhost"
+ port:
+ description:
+ - Tiller's server port.
+ type: int
+ default: 44134
+ namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ type: str
+ default: "default"
+ name:
+ description:
+ - Release name to manage.
+ type: str
+ state:
+ description:
+ - Whether to install C(present), remove C(absent), or purge C(purged) a package.
+ choices: ['absent', 'purged', 'present']
+ type: str
+ default: "present"
+ chart:
+ description:
+ - A map describing the chart to install. See examples for available options.
+ type: dict
+ default: {}
+ values:
+ description:
+ - A map of value options for the chart.
+ type: dict
+ default: {}
+ disable_hooks:
+ description:
+ - Whether to disable hooks during the uninstall process.
+ type: bool
+ default: 'no'
+'''
+
+RETURN = ''' # '''
+
+EXAMPLES = '''
+- name: Install helm chart
+ community.general.helm:
+ host: localhost
+ chart:
+ name: memcached
+ version: 0.4.0
+ source:
+ type: repo
+ location: https://kubernetes-charts.storage.googleapis.com
+ state: present
+ name: my-memcached
+ namespace: default
+
+- name: Uninstall helm chart
+ community.general.helm:
+ host: localhost
+ state: absent
+ name: my-memcached
+
+- name: Install helm chart from a git repo
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/user/helm-chart.git
+ state: present
+ name: my-example
+ namespace: default
+ values:
+ foo: "bar"
+
+- name: Install helm chart from a git repo specifying path
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/helm/charts.git
+ path: stable/memcached
+ state: present
+ name: my-memcached
+ namespace: default
+ values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
+'''
+
+import traceback
+HELM_IMPORT_ERR = None
+try:
+ import grpc
+ from pyhelm import tiller
+ from pyhelm import chartbuilder
+except ImportError:
+ HELM_IMPORT_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def install(module, tserver):
+ changed = False
+ params = module.params
+ name = params['name']
+ values = params['values']
+ chart = module.params['chart']
+ namespace = module.params['namespace']
+
+ chartb = chartbuilder.ChartBuilder(chart)
+ r_matches = (x for x in tserver.list_releases()
+ if x.name == name and x.namespace == namespace)
+ installed_release = next(r_matches, None)
+ if installed_release:
+ if installed_release.chart.metadata.version != chart['version']:
+ tserver.update_release(chartb.get_helm_chart(), False,
+ namespace, name=name, values=values)
+ changed = True
+ else:
+ tserver.install_release(chartb.get_helm_chart(), namespace,
+ dry_run=False, name=name,
+ values=values)
+ changed = True
+
+ return dict(changed=changed)
+
+
+def delete(module, tserver, purge=False):
+ changed = False
+ params = module.params
+
+ if not module.params['name']:
+ module.fail_json(msg='Missing required field name')
+
+ name = module.params['name']
+ disable_hooks = params['disable_hooks']
+
+ try:
+ tserver.uninstall_release(name, disable_hooks, purge)
+ changed = True
+ except grpc._channel._Rendezvous as exc:
+ if 'not found' not in str(exc):
+ raise exc
+
+ return dict(changed=changed)
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=44134),
+ name=dict(type='str', default=''),
+ chart=dict(type='dict'),
+ state=dict(
+ choices=['absent', 'purged', 'present'],
+ default='present'
+ ),
+ # Install options
+ values=dict(type='dict'),
+ namespace=dict(type='str', default='default'),
+
+ # Uninstall options
+ disable_hooks=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True)
+
+ if HELM_IMPORT_ERR:
+ module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
+
+ host = module.params['host']
+ port = module.params['port']
+ state = module.params['state']
+ tserver = tiller.Tiller(host, port)
+
+ if state == 'present':
+ rst = install(module, tserver)
+
+ if state in 'absent':
+ rst = delete(module, tserver)
+
+ if state in 'purged':
+ rst = delete(module, tserver, True)
+
+ module.exit_json(**rst)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py
new file mode 100644
index 00000000..25e3081c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author:
+- Vincent Van der Kussen (@vincentvdk)
+short_description: oVirt/RHEV platform management
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: This module is for deprecated version of ovirt.
+ alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead
+description:
+ - This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ required: true
+ url:
+ description:
+ - The url of the oVirt instance.
+ type: str
+ required: true
+ instance_name:
+ description:
+ - The name of the instance to use.
+ type: str
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - Password of the user to authenticate with.
+ type: str
+ required: true
+ image:
+ description:
+ - The template to use for the instance.
+ type: str
+ resource_type:
+ description:
+ - Whether you want to deploy an image or create an instance from scratch.
+ type: str
+ choices: [ new, template ]
+ zone:
+ description:
+ - Deploy the image to this oVirt cluster.
+ type: str
+ instance_disksize:
+ description:
+ - Size of the instance's disk in GB.
+ type: str
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - The instance's number of CPUs.
+ type: str
+ default: 1
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - The name of the network interface in oVirt/RHEV.
+ type: str
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - The logical network the machine should belong to.
+ type: str
+ default: rhevm
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - The instance's amount of memory in MB.
+ type: str
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - Define whether the instance is a server, desktop or high_performance.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ type: str
+ choices: [ desktop, server, high_performance ]
+ default: server
+ aliases: [ vmtype ]
+ disk_alloc:
+ description:
+ - Define whether disk is thin or preallocated.
+ type: str
+ choices: [ preallocated, thin ]
+ default: thin
+ disk_int:
+ description:
+ - Interface type of the disk.
+ type: str
+ choices: [ ide, virtio ]
+ default: virtio
+ instance_os:
+ description:
+ - Type of Operating System.
+ type: str
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - Define the instance's number of cores.
+ type: str
+ default: 1
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - The Storage Domain where you want to create the instance's disk on.
+ type: str
+ region:
+ description:
+ - The oVirt/RHEV datacenter where you want to deploy to.
+ type: str
+ instance_dns:
+ description:
+ - Define the instance's Primary DNS server.
+ type: str
+ aliases: [ dns ]
+ instance_domain:
+ description:
+ - Define the instance's Domain.
+ type: str
+ aliases: [ domain ]
+ instance_hostname:
+ description:
+ - Define the instance's Hostname.
+ type: str
+ aliases: [ hostname ]
+ instance_ip:
+ description:
+ - Define the instance's IP.
+ type: str
+ aliases: [ ip ]
+ instance_netmask:
+ description:
+ - Define the instance's Netmask.
+ type: str
+ aliases: [ netmask ]
+ instance_gateway:
+ description:
+ - Define the instance's Gateway.
+ type: str
+ aliases: [ gateway ]
+ instance_rootpw:
+ description:
+ - Define the instance's Root password.
+ type: str
+ aliases: [ rootpw ]
+ instance_key:
+ description:
+ - Define the instance's Authorized key.
+ type: str
+ aliases: [ key ]
+ state:
+ description:
+ - Create, terminate or remove instances.
+ type: str
+ choices: [ absent, present, restart, shutdown, started ]
+ default: present
+requirements:
+ - ovirt-engine-sdk-python
+'''
+
+EXAMPLES = '''
+- name: Basic example to provision from image
+ community.general.ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template
+
+- name: Full example to create new instance from scratch
+ community.general.ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio
+
+- name: Stopping an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an instance with cloud init information
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.removed import removed_module
+
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except Exception:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
+ format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
+ format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except Exception:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except Exception:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm is None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
+ user=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ instance_name=dict(type='str', required=True, aliases=['vmname']),
+ password=dict(type='str', required=True, no_log=True),
+ image=dict(type='str'),
+ resource_type=dict(type='str', choices=['new', 'template']),
+ zone=dict(type='str'),
+ instance_disksize=dict(type='str', aliases=['vm_disksize']),
+ instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
+ instance_nic=dict(type='str', aliases=['vmnic']),
+ instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
+ instance_mem=dict(type='str', aliases=['vmmem']),
+ instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
+ disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
+ disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
+ instance_os=dict(type='str', aliases=['vmos']),
+ instance_cores=dict(type='str', default=1, aliases=['vmcores']),
+ instance_hostname=dict(type='str', aliases=['hostname']),
+ instance_ip=dict(type='str', aliases=['ip']),
+ instance_netmask=dict(type='str', aliases=['netmask']),
+ instance_gateway=dict(type='str', aliases=['gateway']),
+ instance_domain=dict(type='str', aliases=['domain']),
+ instance_dns=dict(type='str', aliases=['dns']),
+ instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True),
+ instance_key=dict(type='str', aliases=['key'], no_log=True),
+ sdomain=dict(type='str'),
+ region=dict(type='str'),
+ ),
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server, desktop or high_performance
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ # initialize connection
+ try:
+ c = conn(url + "/api", user, password)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ # vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
new file mode 100644
index 00000000..140d56f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
@@ -0,0 +1,735 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ vmid:
+ description:
+ - the instance id
+ - if not set, the next available VM ID will be fetched from ProxmoxAPI.
+ - if not set, will be fetched from PromoxAPI based on the hostname
+ type: str
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ type: bool
+ default: 'no'
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ type: str
+ pool:
+ description:
+ - Proxmox VE resource pool
+ type: str
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ type: str
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ type: str
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ type: str
+ disk:
+ description:
+ - hard disk size in GB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ memory:
+ description:
+ - memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ swap:
+ description:
+ - swap memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ netif:
+ description:
+ - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
+ type: dict
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
+ type: dict
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ type: str
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ type: str
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ type: str
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the instance
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+ pubkey:
+ description:
+ - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
+ type: str
+ unprivileged:
+ description:
+ - Indicate if the container should be unprivileged
+ type: bool
+ default: 'no'
+ description:
+ description:
+ - Specify the description for the container. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ version_added: '0.2.0'
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: '0.2.0'
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = r'''
+- name: Create new container with minimal options
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with hookscript and description
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ hookscript: 'local:snippets/vm_hook.sh'
+ description: created with ansible
+
+- name: Create new container automatically selecting the next available vmid.
+ community.general.proxmox:
+ node: 'uk-mc02'
+ api_user: 'root@pam'
+ api_password: '1q2w3e'
+ api_host: 'node1'
+ password: '123456'
+ hostname: 'example.org'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options with force(it will rewrite existing container)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: yes
+
+- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options defining network interface with dhcp
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining network interface with static ip
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining a mount with 8GB
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+- name: Create new container with minimal options defining a cpu core limit
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ cores: 2
+
+- name: Start container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+- name: >
+ Start container with mount. You should enter a 90-second timeout because servers
+ with additional disks take longer to boot
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+ timeout: 90
+
+- name: Stop container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+- name: Stop container with force
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ force: yes
+ state: stopped
+
+- name: Restart container(stopped or mounted container you can't restart)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: restarted
+
+- name: Remove container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VZ_TYPE = None
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, hostname):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
+
+
+def get_instance(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if VZ_TYPE == 'lxc':
+ kwargs['cpulimit'] = cpus
+ kwargs['rootfs'] = disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ if 'pubkey' in kwargs:
+ if proxmox_version(proxmox) >= LooseVersion('4.2'):
+ kwargs['ssh-public-keys'] = kwargs['pubkey']
+ del kwargs['pubkey']
+ else:
+ kwargs['cpus'] = cpus
+ kwargs['disk'] = disk
+
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ vmid=dict(required=False),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ pool=dict(),
+ password=dict(no_log=True),
+ hostname=dict(),
+ ostemplate=dict(),
+ disk=dict(type='str'),
+ cores=dict(type='int'),
+ cpus=dict(type='int'),
+ memory=dict(type='int'),
+ swap=dict(type='int'),
+ netif=dict(type='dict'),
+ mounts=dict(type='dict'),
+ ip_address=dict(),
+ onboot=dict(type='bool'),
+ storage=dict(default='local'),
+ cpuunits=dict(type='int'),
+ nameserver=dict(),
+ searchdomain=dict(),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ pubkey=dict(type='str', default=None),
+ unprivileged=dict(type='bool', default=False),
+ description=dict(type='str'),
+ hookscript=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ disk="3",
+ cores=1,
+ cpus=1,
+ memory=512,
+ swap=0,
+ onboot=False,
+ cpuunits=1000,
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc'
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = get_nextvmid(module, proxmox)
+ elif not vmid and hostname:
+ hosts = get_vmid(proxmox, hostname)
+ if len(hosts) == 0:
+ module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
+ vmid = hosts[0]
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ cores=module.params['cores'],
+ pool=module.params['pool'],
+ password=module.params['password'],
+ hostname=module.params['hostname'],
+ ostemplate=module.params['ostemplate'],
+ netif=module.params['netif'],
+ mounts=module.params['mounts'],
+ ip_address=module.params['ip_address'],
+ onboot=int(module.params['onboot']),
+ cpuunits=module.params['cpuunits'],
+ nameserver=module.params['nameserver'],
+ searchdomain=module.params['searchdomain'],
+ force=int(module.params['force']),
+ pubkey=module.params['pubkey'],
+ unprivileged=int(module.params['unprivileged']),
+ description=module.params['description'],
+ hookscript=module.params['hookscript'])
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
+ getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout)):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
new file mode 100644
index 00000000..fc7c37c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_domain_info
+short_description: Retrieve information about one or more Proxmox VE domains
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE domains.
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing domains
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_domains
+
+- name: Retrieve information about the pve domain
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_domain_pve
+'''
+
+
+RETURN = '''
+proxmox_domains:
+ description: List of authentication domains.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the realm.
+ returned: on success
+ type: str
+ realm:
+ description: Realm name.
+ returned: on success
+ type: str
+ type:
+ description: Realm type.
+ returned: on success
+ type: str
+ digest:
+ description: Realm hash.
+ returned: on success, can be absent
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
+ def get_domain(self, realm):
+ try:
+ domain = self.proxmox_api.access.domains.get(realm)
+ except Exception:
+ self.module.fail_json(msg="Domain '%s' does not exist" % realm)
+ domain['realm'] = realm
+ return domain
+
+ def get_domains(self):
+ domains = self.proxmox_api.access.domains.get()
+ return domains
+
+
+def proxmox_domain_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ domain_info_args = proxmox_domain_info_argument_spec()
+ module_args.update(domain_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxDomainInfoAnsible(module)
+ domain = module.params['domain']
+
+ if domain:
+ domains = [proxmox.get_domain(realm=domain)]
+ else:
+ domains = proxmox.get_domains()
+ result['proxmox_domains'] = domains
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
new file mode 100644
index 00000000..063d28e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_group_info
+short_description: Retrieve information about one or more Proxmox VE groups
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE groups
+options:
+ group:
+ description:
+ - Restrict results to a specific group.
+ aliases: ['groupid', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing groups
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_groups
+
+- name: Retrieve information about the admin group
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ group: admin
+ register: proxmox_group_admin
+'''
+
+
+RETURN = '''
+proxmox_groups:
+ description: List of groups.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the group.
+ returned: on success, can be absent
+ type: str
+ groupid:
+ description: Group name.
+ returned: on success
+ type: str
+ users:
+ description: List of users in the group.
+ returned: on success
+ type: list
+ elements: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
+ def get_group(self, groupid):
+ try:
+ group = self.proxmox_api.access.groups.get(groupid)
+ except Exception:
+ self.module.fail_json(msg="Group '%s' does not exist" % groupid)
+ group['groupid'] = groupid
+ return ProxmoxGroup(group)
+
+ def get_groups(self):
+ groups = self.proxmox_api.access.groups.get()
+ return [ProxmoxGroup(group) for group in groups]
+
+
+class ProxmoxGroup:
+ def __init__(self, group):
+ self.group = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in group.items():
+ if k == 'users' and type(v) == str:
+ self.group['users'] = v.split(',')
+ elif k == 'members':
+ self.group['users'] = group['members']
+ else:
+ self.group[k] = v
+
+
+def proxmox_group_info_argument_spec():
+ return dict(
+ group=dict(type='str', aliases=['groupid', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ group_info_args = proxmox_group_info_argument_spec()
+ module_args.update(group_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxGroupInfoAnsible(module)
+ group = module.params['group']
+
+ if group:
+ groups = [proxmox.get_group(group=group)]
+ else:
+ groups = proxmox.get_groups()
+ result['proxmox_groups'] = [group.group for group in groups]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
new file mode 100644
index 00000000..0161fefc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
@@ -0,0 +1,1449 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
+options:
+ acpi:
+ description:
+ - Specify if ACPI should be enabled/disabled.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ agent:
+ description:
+ - Specify if the QEMU Guest Agent should be enabled/disabled.
+ type: bool
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ type: str
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ autostart:
+ description:
+ - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ type: str
+ choices: ['seabios', 'ovmf']
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ type: str
+ cicustom:
+ description:
+ - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
+ type: str
+ version_added: 1.3.0
+ cipassword:
+ description:
+ - 'cloud-init: password of default user to create.'
+ type: str
+ version_added: 1.3.0
+ citype:
+ description:
+ - 'cloud-init: Specifies the cloud-init configuration format.'
+ - The default depends on the configured operating system type (C(ostype)).
+ - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ type: str
+ choices: ['nocloud', 'configdrive2']
+ version_added: 1.3.0
+ ciuser:
+ description:
+ - 'cloud-init: username of default user to create.'
+ type: str
+ version_added: 1.3.0
+ clone:
+ description:
+ - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ type: int
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ type: str
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ type: str
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ type: str
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used with states C(stopped) and C(restarted).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ format:
+ description:
+ - Target drive's backing file's data format.
+ - Used only with clone
+ - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
+ not specifying this option is equivalent to setting it to C(unspecified).
+ Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
+ type: str
+ choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ type: bool
+ full:
+ description:
+ - Create a full copy of all disk. This is always done when you clone a normal VM.
+ - For VM templates, we try to create a linked clone by default.
+ - Used only with clone
+ type: bool
+ default: 'yes'
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ type: str
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ type: str
+ choices: ['any', '2', '1024']
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ ipconfig:
+ description:
+ - 'cloud-init: Set the IP configuration.'
+ - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
+ - Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
+ - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
+ - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
+ - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
+ - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
+ - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
+ type: dict
+ version_added: 1.3.0
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ type: str
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ type: bool
+ lock:
+ description:
+ - Lock/unlock the VM.
+ type: str
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ type: str
+ memory:
+ description:
+ - Memory size in MB for instance.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ type: int
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ type: str
+ nameservers:
+ description:
+ - 'cloud-init: DNS server IP address(es).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ type: dict
+ newid:
+ description:
+ - VMID for the clone. Used only with clone.
+ - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ node:
+ description:
+ - Proxmox VE node, where the new VM will be created.
+ - Only required for C(state=present).
+ - For other states, it will be autodiscovered.
+ type: str
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ type: dict
+ numa_enabled:
+ description:
+ - Enables NUMA.
+ type: bool
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ type: dict
+ pool:
+ description:
+ - Add the new VM to the specified pool.
+ type: str
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ type: bool
+ reboot:
+ description:
+ - Allow reboot. If set to C(yes), the VM exit on reboot.
+ type: bool
+ revert:
+ description:
+ - Revert a pending change.
+ type: str
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ type: str
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ searchdomains:
+ description:
+ - 'cloud-init: Sets DNS search domain(s).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ type: int
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ type: bool
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ type: str
+ snapname:
+ description:
+ - The name of the snapshot. Used only with clone.
+ type: str
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ sshkeys:
+ description:
+ - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
+ type: str
+ version_added: 1.3.0
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ type: str
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ default: present
+ storage:
+ description:
+ - Target storage for full clone.
+ type: str
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ target:
+ description:
+ - Target node. Only allowed if the original VM is on shared storage.
+ - Used only with clone
+ type: str
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ type: bool
+ template:
+ description:
+ - Enables/disables the template.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ update:
+ description:
+ - If C(yes), the VM will be updated with new value.
+ - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
+ - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
+ - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'no'
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ type: int
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ vmid:
+ description:
+ - Specifies the VM ID. Instead use I(name) parameter.
+ - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ type: str
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
+ I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
+ I(tablet), I(template), I(vga), options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+EXAMPLES = '''
+- name: Create new VM with minimal options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+
+- name: Create new VM with minimal options and given vmid
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ vmid: 100
+
+- name: Create new VM with two network interface options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ net1: 'e1000,bridge=vmbr2'
+
+- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ virtio:
+ virtio0: 'VMs_LVM:10'
+ virtio1: 'VMs:2,format=qcow2'
+ virtio2: 'VMs:5,format=raw'
+ cores: 4
+ vcpus: 2
+
+- name: >
+ Clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ format: qcow2
+ timeout: 500
+
+- name: >
+ Create linked clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ full: no
+ format: unspecified
+ timeout: 500
+
+- name: Clone VM with source vmid and target newid and raw format
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: arbitrary_name
+ vmid: 108
+ newid: 152
+ name: zavala
+ node: sabrewulf
+ storage: LVM_STO
+ format: raw
+ timeout: 300
+
+- name: Create new VM and lock it for snapshot
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ lock: snapshot
+
+- name: Create new VM and set protection to disable the remove VM and remove disk operations
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ protection: yes
+
+- name: Create new VM using cloud-init with a username and password
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ ciuser: mylinuxuser
+ cipassword: supersecret
+ searchdomains: 'mydomain.internal'
+ nameservers: 1.1.1.1
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
+
+- name: Create new VM using Cloud-Init with an ssh key
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
+ searchdomains: 'mydomain.internal'
+ nameservers:
+ - '1.1.1.1'
+ - '8.8.8.8'
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24'
+
+- name: Start VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: started
+
+- name: Stop VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+
+- name: Stop VM with force
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+ force: yes
+
+- name: Restart VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: restarted
+
+- name: Remove VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: absent
+
+- name: Get VM current state
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: current
+
+- name: Update VM configuration
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ update: yes
+
+- name: Delete QEMU parameters
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ delete: 'args,template,cpulimit'
+
+- name: Revert a pending change
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ revert: 'template,cpulimit'
+'''
+
+RETURN = '''
+devices:
+ description: The list of devices created or used.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "ide0": "VMS_LVM:vm-115-disk-1",
+ "ide1": "VMs:115/vm-115-disk-3.raw",
+ "virtio0": "VMS_LVM:vm-115-disk-2",
+ "virtio1": "VMs:115/vm-115-disk-1.qcow2",
+ "virtio2": "VMs:115/vm-115-disk-2.raw"
+ }'
+mac:
+ description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "net0": "3E:6E:97:D2:31:9F",
+ "net1": "B6:A1:FC:EF:78:A4"
+ }'
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description:
+ - The current virtual machine status.
+ - Returned only when C(state=current)
+ returned: success
+ type: dict
+ sample: '{
+ "changed": false,
+ "msg": "VM kropta with vmid = 110 is running",
+ "status": "running"
+ }'
+'''
+
+import os
+import re
+import time
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, name):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name]
+
+
+def get_vm(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def get_vminfo(module, proxmox, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = proxmox.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ for k, v in kwargs.items():
+ if re.match(r'net[0-9]', k) is not None:
+ interface = k
+ k = vm[k]
+ k = re.search('=(.*?),', k).group(1)
+ mac[interface] = k
+ if (re.match(r'virtio[0-9]', k) is not None or
+ re.match(r'ide[0-9]', k) is not None or
+ re.match(r'scsi[0-9]', k) is not None or
+ re.match(r'sata[0-9]', k) is not None):
+ device = k
+ k = vm[k]
+ k = re.search('(.*?),', k).group(1)
+ devices[device] = k
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+
+def settings(module, proxmox, vmid, node, name, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if proxmox_node.qemu(vmid).config.set(**kwargs) is None:
+ return True
+ else:
+ return False
+
+
+def wait_for_task(module, proxmox, node, taskid):
+ timeout = module.params['timeout']
+
+ while timeout:
+ task = proxmox.nodes(node).tasks(taskid).status.get()
+ if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
+ # Wait an extra second as the API can be a ahead of the hypervisor
+ time.sleep(1)
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ break
+ time.sleep(1)
+ return False
+
+
+def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force', 'protection', 'skiplock']
+ only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
+
+ # valide clone parameters
+ valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
+ clone_params = {}
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
+
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
+
+ # The features work only on PVE 4+
+ if PVE_MAJOR_VERSION < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # The features work only on PVE 6
+ if PVE_MAJOR_VERSION < 6:
+ for p in only_v6:
+ if p in kwargs:
+ del kwargs[p]
+
+ # 'sshkeys' param expects an urlencoded string
+ if 'sshkeys' in kwargs:
+ urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
+ kwargs['sshkeys'] = str(urlencoded_ssh_keys)
+
+ # If update, don't update disk (virtio, ide, sata, scsi) and network interface
+ # pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
+ if update:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'net' in kwargs:
+ del kwargs['net']
+ if 'force' in kwargs:
+ del kwargs['force']
+ if 'pool' in kwargs:
+ del kwargs['pool']
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Rename numa_enabled to numa. According the API documentation
+ if 'numa_enabled' in kwargs:
+ kwargs['numa'] = kwargs['numa_enabled']
+ del kwargs['numa_enabled']
+
+ # PVE api expects strings for the following params
+ if 'nameservers' in module.params:
+ nameservers = module.params.pop('nameservers')
+ if nameservers:
+ kwargs['nameserver'] = ' '.join(nameservers)
+ if 'searchdomains' in module.params:
+ searchdomains = module.params.pop('searchdomains')
+ if searchdomains:
+ kwargs['searchdomain'] = ' '.join(searchdomains)
+
+ # -args and skiplock require root@pam user
+ if module.params['api_user'] == "root@pam" and module.params['args'] is None:
+ if not update:
+ kwargs['args'] = vm_args
+ elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
+ kwargs['args'] = module.params['args']
+ elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
+ module.fail_json(msg='args parameter require root@pam user. ')
+
+ if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
+ module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ if update:
+ if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
+ return True
+ else:
+ return False
+ elif module.params['clone'] is not None:
+ for param in valid_clone_params:
+ if module.params[param] is not None:
+ clone_params[param] = module.params[param]
+ clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
+ taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
+ else:
+ taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ if not wait_for_task(module, proxmox, node, taskid):
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def start_vm(module, proxmox, vm):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.start.post()
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def stop_vm(module, proxmox, vm, force):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ acpi=dict(type='bool'),
+ agent=dict(type='bool'),
+ args=dict(type='str'),
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ autostart=dict(type='bool'),
+ balloon=dict(type='int'),
+ bios=dict(choices=['seabios', 'ovmf']),
+ boot=dict(type='str'),
+ bootdisk=dict(type='str'),
+ cicustom=dict(type='str'),
+ cipassword=dict(type='str', no_log=True),
+ citype=dict(type='str', choices=['nocloud', 'configdrive2']),
+ ciuser=dict(type='str'),
+ clone=dict(type='str', default=None),
+ cores=dict(type='int'),
+ cpu=dict(type='str'),
+ cpulimit=dict(type='int'),
+ cpuunits=dict(type='int'),
+ delete=dict(type='str', default=None),
+ description=dict(type='str'),
+ digest=dict(type='str'),
+ force=dict(type='bool'),
+ format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
+ freeze=dict(type='bool'),
+ full=dict(type='bool', default=True),
+ hostpci=dict(type='dict'),
+ hotplug=dict(type='str'),
+ hugepages=dict(choices=['any', '2', '1024']),
+ ide=dict(type='dict'),
+ ipconfig=dict(type='dict'),
+ keyboard=dict(type='str'),
+ kvm=dict(type='bool'),
+ localtime=dict(type='bool'),
+ lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine=dict(type='str'),
+ memory=dict(type='int'),
+ migrate_downtime=dict(type='int'),
+ migrate_speed=dict(type='int'),
+ name=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ net=dict(type='dict'),
+ newid=dict(type='int', default=None),
+ node=dict(),
+ numa=dict(type='dict'),
+ numa_enabled=dict(type='bool'),
+ onboot=dict(type='bool'),
+ ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']),
+ parallel=dict(type='dict'),
+ pool=dict(type='str'),
+ protection=dict(type='bool'),
+ reboot=dict(type='bool'),
+ revert=dict(type='str'),
+ sata=dict(type='dict'),
+ scsi=dict(type='dict'),
+ scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial=dict(type='dict'),
+ searchdomains=dict(type='list', elements='str'),
+ shares=dict(type='int'),
+ skiplock=dict(type='bool'),
+ smbios=dict(type='str'),
+ snapname=dict(type='str'),
+ sockets=dict(type='int'),
+ sshkeys=dict(type='str'),
+ startdate=dict(type='str'),
+ startup=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ storage=dict(type='str'),
+ tablet=dict(type='bool'),
+ target=dict(type='str'),
+ tdf=dict(type='bool'),
+ template=dict(type='bool'),
+ timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ vcpus=dict(type='int'),
+ vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio=dict(type='dict'),
+ vmid=dict(type='int', default=None),
+ watchdog=dict(),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ ),
+ mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
+ required_one_of=[('name', 'vmid',)],
+ required_if=[('state', 'present', ['node'])]
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ clone = module.params['clone']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ delete = module.params['delete']
+ memory = module.params['memory']
+ name = module.params['name']
+ newid = module.params['newid']
+ node = module.params['node']
+ revert = module.params['revert']
+ sockets = module.params['sockets']
+ state = module.params['state']
+ update = bool(module.params['update'])
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ acpi=True,
+ autostart=False,
+ balloon=0,
+ boot='cnd',
+ cores=1,
+ cpu='kvm64',
+ cpuunits=1000,
+ force=False,
+ format='qcow2',
+ kvm=True,
+ memory=512,
+ ostype='l26',
+ sockets=1,
+ tablet=False,
+ template=False,
+ vga='std',
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ if module.params['format'] == 'unspecified':
+ module.params['format'] = None
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global PVE_MAJOR_VERSION
+ version = proxmox_version(proxmox)
+ PVE_MAJOR_VERSION = 3 if version < LooseVersion('4.0') else version.version[0]
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
+ if not vmid:
+ if state == 'present' and not update and not clone and not delete and not revert:
+ try:
+ vmid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ else:
+ clone_target = clone or name
+ try:
+ vmid = get_vmid(proxmox, clone_target)[0]
+ except Exception:
+ vmid = -1
+
+ if clone is not None:
+ # If newid is not defined then retrieve the next free id from ProxmoxAPI
+ if not newid:
+ try:
+ newid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+
+ # Ensure source VM name exists when cloning
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
+
+ # Ensure source VM id exists when cloning
+ if not get_vm(proxmox, vmid):
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ # Ensure the choosen VM name doesn't already exist when cloning
+ if get_vmid(proxmox, name):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+
+ # Ensure the choosen VM id doesn't already exist when cloning
+ if get_vm(proxmox, newid):
+ module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
+
+ if delete is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, delete=delete)
+ module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
+
+ if revert is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, revert=revert)
+ module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+
+ if state == 'present':
+ try:
+ if get_vm(proxmox, vmid) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
+ elif get_vmid(proxmox, name) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+ elif not (node, name):
+ module.fail_json(msg='node, name is mandatory for creating/updating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ acpi=module.params['acpi'],
+ agent=module.params['agent'],
+ autostart=module.params['autostart'],
+ balloon=module.params['balloon'],
+ bios=module.params['bios'],
+ boot=module.params['boot'],
+ bootdisk=module.params['bootdisk'],
+ cicustom=module.params['cicustom'],
+ cipassword=module.params['cipassword'],
+ citype=module.params['citype'],
+ ciuser=module.params['ciuser'],
+ cpulimit=module.params['cpulimit'],
+ cpuunits=module.params['cpuunits'],
+ description=module.params['description'],
+ digest=module.params['digest'],
+ force=module.params['force'],
+ freeze=module.params['freeze'],
+ hostpci=module.params['hostpci'],
+ hotplug=module.params['hotplug'],
+ hugepages=module.params['hugepages'],
+ ide=module.params['ide'],
+ ipconfig=module.params['ipconfig'],
+ keyboard=module.params['keyboard'],
+ kvm=module.params['kvm'],
+ localtime=module.params['localtime'],
+ lock=module.params['lock'],
+ machine=module.params['machine'],
+ migrate_downtime=module.params['migrate_downtime'],
+ migrate_speed=module.params['migrate_speed'],
+ net=module.params['net'],
+ numa=module.params['numa'],
+ numa_enabled=module.params['numa_enabled'],
+ onboot=module.params['onboot'],
+ ostype=module.params['ostype'],
+ parallel=module.params['parallel'],
+ pool=module.params['pool'],
+ protection=module.params['protection'],
+ reboot=module.params['reboot'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ scsihw=module.params['scsihw'],
+ serial=module.params['serial'],
+ shares=module.params['shares'],
+ skiplock=module.params['skiplock'],
+ smbios1=module.params['smbios'],
+ snapname=module.params['snapname'],
+ sshkeys=module.params['sshkeys'],
+ startdate=module.params['startdate'],
+ startup=module.params['startup'],
+ tablet=module.params['tablet'],
+ target=module.params['target'],
+ tdf=module.params['tdf'],
+ template=module.params['template'],
+ vcpus=module.params['vcpus'],
+ vga=module.params['vga'],
+ virtio=module.params['virtio'],
+ watchdog=module.params['watchdog'])
+
+ if not clone:
+ get_vminfo(module, proxmox, node, vmid,
+ ide=module.params['ide'],
+ net=module.params['net'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ virtio=module.params['virtio'])
+ if update:
+ module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ if update:
+ module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
+ elif clone is not None:
+ module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
+ else:
+ module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+
+ elif state == 'started':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False)
+
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+ taskid = proxmox_node.qemu.delete(vmid)
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ else:
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'current':
+ status = {}
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
new file mode 100644
index 00000000..541dc28e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: 'no'
+ type: bool
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ type: str
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ type: path
+ template:
+ description:
+ - the template name
+ - Required for state C(absent) to delete a template.
+ - Required for state C(present) to download an appliance container template (pveam).
+ type: str
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ type: str
+ default: 'vztmpl'
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the template
+ type: str
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = '''
+- name: Upload new openvz template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: >
+ Upload new openvz template with minimal options use environment
+ PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: Upload new openvz template with all options and force overwrite
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: yes
+
+- name: Delete template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+
+- name: Download proxmox appliance container template
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+
+
+def task_status(module, proxmox, node, taskid, timeout):
+ """
+ Check the task status and wait until the task is completed or the timeout is reached.
+ """
+ while timeout:
+ task_status = proxmox.nodes(node).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def upload_template(module, proxmox, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def download_template(module, proxmox, node, storage, template, timeout):
+ taskid = proxmox.nodes(node).aplinfo.post(storage=storage, template=template)
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ src=dict(type='path'),
+ template=dict(),
+ content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
+ storage=dict(default='local'),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ # Used to test the validity of the token if given
+ proxmox.version.get()
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
+
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+
+ if download_template(module, proxmox, node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+
+ template = os.path.basename(src)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if upload_template(module, proxmox, node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
new file mode 100644
index 00000000..1de93e60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_user_info
+short_description: Retrieve information about one or more Proxmox VE users
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE users
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm']
+ type: str
+ user:
+ description:
+ - Restrict results to a specific user.
+ aliases: ['name']
+ type: str
+ userid:
+ description:
+ - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+EXAMPLES = '''
+- name: List existing users
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_users
+
+- name: List existing users in the pve authentication realm
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_users_pve
+
+- name: Retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ userid: admin@pve
+ register: proxmox_user_admin
+
+- name: Alternative way to retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ user: admin
+ domain: pve
+ register: proxmox_user_admin
+'''
+
+
+RETURN = '''
+proxmox_users:
+ description: List of users.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the user.
+ returned: on success
+ type: str
+ domain:
+ description: User's authentication realm, also the right part of the user ID.
+ returned: on success
+ type: str
+ email:
+ description: User's email address.
+ returned: on success
+ type: str
+ enabled:
+ description: User's account state.
+ returned: on success
+ type: bool
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ firstname:
+ description: User's first name.
+ returned: on success
+ type: str
+ groups:
+ description: List of groups which the user is a member of.
+ returned: on success
+ type: list
+ elements: str
+ keys:
+ description: User's two factor authentication keys.
+ returned: on success
+ type: str
+ lastname:
+ description: User's last name.
+ returned: on success
+ type: str
+ tokens:
+ description: List of API tokens associated to the user.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the token.
+ returned: on success
+ type: str
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ privsep:
+ description: Describe if the API token is further restricted with ACLs or is fully privileged.
+ returned: on success
+ type: bool
+ tokenid:
+ description: Token name.
+ returned: on success
+ type: str
+ user:
+ description: User's login name, also the left part of the user ID.
+ returned: on success
+ type: str
+ userid:
+ description: Proxmox user ID, represented as user@realm.
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxUserInfoAnsible(ProxmoxAnsible):
+ def get_user(self, userid):
+ try:
+ user = self.proxmox_api.access.users.get(userid)
+ except Exception:
+ self.module.fail_json(msg="User '%s' does not exist" % userid)
+ user['userid'] = userid
+ return ProxmoxUser(user)
+
+ def get_users(self, domain=None):
+ users = self.proxmox_api.access.users.get(full=1)
+ users = [ProxmoxUser(user) for user in users]
+ if domain:
+ return [user for user in users if user.user['domain'] == domain]
+ return users
+
+
+class ProxmoxUser:
+ def __init__(self, user):
+ self.user = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in user.items():
+ if k == 'enable':
+ self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
+ elif k == 'userid':
+ self.user['user'] = user['userid'].split('@')[0]
+ self.user['domain'] = user['userid'].split('@')[1]
+ self.user[k] = v
+ elif k in ['groups', 'tokens'] and (v == '' or v is None):
+ self.user[k] = []
+ elif k == 'groups' and type(v) == str:
+ self.user['groups'] = v.split(',')
+ elif k == 'tokens' and type(v) == list:
+ for token in v:
+ if 'privsep' in token:
+ token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
+ self.user['tokens'] = v
+ elif k == 'tokens' and type(v) == dict:
+ self.user['tokens'] = list()
+ for tokenid, tokenvalues in v.items():
+ t = tokenvalues
+ t['tokenid'] = tokenid
+ if 'privsep' in tokenvalues:
+ t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
+ self.user['tokens'].append(t)
+ else:
+ self.user[k] = v
+
+
+def proxmox_user_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm']),
+ user=dict(type='str', aliases=['name']),
+ userid=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ user_info_args = proxmox_user_info_argument_spec()
+ module_args.update(user_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxUserInfoAnsible(module)
+ domain = module.params['domain']
+ user = module.params['user']
+ if user and domain:
+ userid = user + '@' + domain
+ else:
+ userid = module.params['userid']
+
+ if userid:
+ users = [proxmox.get_user(userid=userid)]
+ else:
+ users = proxmox.get_users(domain=domain)
+ result['proxmox_users'] = [user.user for user in users]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
new file mode 100644
index 00000000..2aebc346
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhevm
+short_description: RHEV/oVirt automation
+description:
+ - This module only supports oVirt/RHEV version 3.
+ - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+requirements:
+ - ovirtsdk
+author:
+- Timothy Vandenbrande (@TimothyVandenbrande)
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ default: admin@internal
+ password:
+ description:
+ - The password for user authentication.
+ type: str
+ required: true
+ server:
+ description:
+ - The name/IP of your RHEV-m/oVirt instance.
+ type: str
+ default: 127.0.0.1
+ port:
+ description:
+ - The port on which the API is reachable.
+ type: int
+ default: 443
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the VM.
+ type: str
+ cluster:
+ description:
+ - The RHEV/oVirt cluster in which you want you VM to start.
+ type: str
+ datacenter:
+ description:
+ - The RHEV/oVirt datacenter in which you want you VM to start.
+ type: str
+ default: Default
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ type: str
+ choices: [ absent, cd, down, info, ping, present, restarted, up ]
+ default: present
+ image:
+ description:
+ - The template to use for the VM.
+ type: str
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ type: str
+ choices: [ desktop, host, server ]
+ default: server
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ type: str
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ type: int
+ default: 2
+ cpu_share:
+ description:
+ - This parameter is used to configure the CPU share.
+ type: int
+ default: 0
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ type: int
+ default: 1
+ osver:
+ description:
+ - The operating system option in RHEV/oVirt.
+ type: str
+ default: rhel_6x64
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ type: int
+ default: 1
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ type: bool
+ default: yes
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ type: list
+ elements: str
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ type: list
+ elements: str
+ aliases: [ interfaces, nics ]
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ type: list
+ elements: str
+ default: [ hd, network ]
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ type: bool
+ default: yes
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ type: str
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up').
+ - When I(state = 'down').
+ - When I(state = 'restarted').
+ type: int
+'''
+
+RETURN = r'''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = r'''
+- name: Basic get info from VM
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ state: info
+
+- name: Basic create example from image
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ cluster: centos
+ image: centos7_x64
+ state: present
+
+- name: Power management
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: uptime_server
+ image: centos7_x64
+ state: down
+
+- name: Multi disk, multi nic create example
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: server007
+ type: server
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: eth0
+ vlan: vlan2202
+ - name: eth1
+ vlan: vlan36
+ - name: eth2
+ vlan: vlan38
+ - name: eth3
+ vlan: vlan2202
+ disks:
+ - name: root
+ size: 10
+ domain: ssd-san
+ - name: swap
+ size: 10
+ domain: 15kiscsi-san
+ - name: opt
+ size: 10
+ domain: 15kiscsi-san
+ - name: var
+ size: 10
+ domain: 10kiscsi-san
+ - name: home
+ size: 10
+ domain: sata-san
+ boot_order:
+ - network
+ - hd
+ state: present
+
+- name: Add a CD to the disk cd_drive
+ community.general.rhevm:
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: server007
+ cd_drive: rhev-tools-setup.iso
+ state: cd
+
+- name: New host deployment + host network configuration
+ community.general.rhevm:
+ password: '{{ rhevm.admin.pass }}'
+ name: ovirt_node007
+ type: host
+ cluster: rhevm01
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: 172.31.224.200
+ netmask: 255.255.254.0
+ - name: p3p2
+ ip: 172.31.225.200
+ netmask: 255.255.254.0
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: rhevm
+ ip: 172.31.222.200
+ netmask: 255.255.255.0
+ management: yes
+ - name: bond0.36
+ network: vlan36
+ ip: 10.2.36.200
+ netmask: 255.255.254.0
+ gateway: 10.2.36.254
+ - name: bond0.2202
+ network: vlan2202
+ - name: bond0.38
+ network: vlan38
+ state: present
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
+STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
+
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except Exception:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has successfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ),
+ override_configuration=True,
+ bonding=tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_drive)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_drive)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ VM.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ # pylint: disable=unreachable
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if memory_policy == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == memory_policy:
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == memory:
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if memory_policy > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == cpu:
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == cpu_share:
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost:
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
+ user=dict(type='str', default='admin@internal'),
+ password=dict(type='str', required=True, no_log=True),
+ server=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int', default=443),
+ insecure_api=dict(type='bool', default=False),
+ name=dict(type='str'),
+ image=dict(type='str'),
+ datacenter=dict(type='str', default="Default"),
+ type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
+ cluster=dict(type='str', default=''),
+ vmhost=dict(type='str'),
+ vmcpu=dict(type='int', default=2),
+ vmmem=dict(type='int', default=1),
+ disks=dict(type='list', elements='str'),
+ osver=dict(type='str', default="rhel_6x64"),
+ ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
+ timeout=dict(type='int'),
+ mempol=dict(type='int', default=1),
+ vm_ha=dict(type='bool', default=True),
+ cpu_share=dict(type='int', default=0),
+ boot_order=dict(type='list', elements='str', default=['hd', 'network']),
+ del_prot=dict(type='bool', default=True),
+ cd_drive=dict(type='str'),
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
new file mode 100644
index 00000000..912d4226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
+options:
+ state:
+ description:
+ - Goal state of given stage/project.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ serverless_bin_path:
+ description:
+ - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
+ type: path
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ type: path
+ required: true
+ stage:
+ description:
+ - The name of the serverless framework project stage to deploy to.
+ - This uses the serverless framework default "dev".
+ type: str
+ functions:
+ description:
+ - A list of specific functions to deploy.
+ - If this is not provided, all functions in the service will be deployed.
+ type: list
+ elements: str
+ default: []
+ region:
+ description:
+ - AWS region to deploy the service to.
+ - This parameter defaults to C(us-east-1).
+ type: str
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them.
+ - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ type: bool
+ default: yes
+ force:
+ description:
+ - Whether or not to force full deployment, equivalent to serverless C(--force) option.
+ type: bool
+ default: no
+ verbose:
+ description:
+ - Shows all stack events during deployment, and display any Stack Output.
+ type: bool
+ default: no
+notes:
+ - Currently, the C(serverless) command must be in the path of the node executing the task.
+ In the future this may be a flag.
+requirements:
+- serverless
+- yaml
+author:
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = r'''
+- name: Basic deploy of a service
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+- name: Deploy specific functions
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ functions:
+ - my_func_one
+ - my_func_two
+
+- name: Deploy a project, then pull its resource list back into Ansible
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_info module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_info:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+
+- name: Deploy a project using a locally installed serverless binary
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ serverless_bin_path: node_modules/.bin/serverless
+'''
+
+RETURN = r'''
+service_name:
+ type: str
+ description: The service name specified in the serverless.yml that was just deployed.
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: str
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: str
+ description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+'''
+
+import os
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def read_serverless_config(module):
+ path = module.params.get('service_path')
+
+ try:
+ with open(os.path.join(path, 'serverless.yml')) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e)))
+
+ module.fail_json(msg="Failed to open serverless config at {0}".format(
+ os.path.join(path, 'serverless.yml')))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{0}-{1}".format(config['service'], stage)
+
+ return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ functions=dict(type='list', elements='str'),
+ region=dict(type='str', default=''),
+ stage=dict(type='str', default=''),
+ deploy=dict(type='bool', default=True),
+ serverless_bin_path=dict(type='path'),
+ force=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ ),
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg='yaml is required for this module')
+
+ service_path = module.params.get('service_path')
+ state = module.params.get('state')
+ functions = module.params.get('functions')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+ force = module.params.get('force', False)
+ verbose = module.params.get('verbose', False)
+ serverless_bin_path = module.params.get('serverless_bin_path')
+
+ if serverless_bin_path is not None:
+ command = serverless_bin_path + " "
+ else:
+ command = "serverless "
+
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
+
+ if state == 'present':
+ if not deploy:
+ command += '--noDeploy '
+ elif force:
+ command += '--force '
+
+ if region:
+ command += '--region {0} '.format(region)
+ if stage:
+ command += '--stage {0} '.format(stage)
+ if verbose:
+ command += '--verbose '
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{0}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
new file mode 100644
index 00000000..680bab9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: terraform
+short_description: Manages a Terraform deployment (and plans)
+description:
+ - Provides support for deploying resources with Terraform and pulling
+ resource information back into Ansible.
+options:
+ state:
+ choices: ['planned', 'present', 'absent']
+ description:
+ - Goal state of given stage/project
+ type: str
+ default: present
+ binary_path:
+ description:
+ - The path of a terraform binary to use, relative to the 'service_path'
+ unless you supply an absolute path.
+ type: path
+ project_path:
+ description:
+ - The path to the root of the Terraform directory with the
+ vars.tf/main.tf/etc to use.
+ type: path
+ required: true
+ workspace:
+ description:
+ - The terraform workspace to work with.
+ type: str
+ default: default
+ purge_workspace:
+ description:
+ - Only works with state = absent
+ - If true, the workspace will be deleted after the "terraform destroy" action.
+ - The 'default' workspace will not be deleted.
+ default: false
+ type: bool
+ plan_file:
+ description:
+ - The path to an existing Terraform plan file to apply. If this is not
+ specified, Ansible will build a new TF plan and execute it.
+ Note that this option is required if 'state' has the 'planned' value.
+ type: path
+ state_file:
+ description:
+ - The path to an existing Terraform state file to use when building plan.
+ If this is not specified, the default `terraform.tfstate` will be used.
+ - This option is ignored when plan is specified.
+ type: path
+ variables_files:
+ description:
+ - The path to a variables file for Terraform to fill into the TF
+ configurations. This can accept a list of paths to multiple variables files.
+ - Up until Ansible 2.9, this option was usable as I(variables_file).
+ type: list
+ elements: path
+ aliases: [ 'variables_file' ]
+ variables:
+ description:
+ - A group of key-values to override template variables or those in
+ variables files.
+ type: dict
+ targets:
+ description:
+ - A list of specific resources to target in this plan/application. The
+ resources selected here will also auto-include any dependencies.
+ type: list
+ elements: str
+ lock:
+ description:
+ - Enable statefile locking, if you use a service that accepts locks (such
+ as S3+DynamoDB) to store your statefile.
+ type: bool
+ default: true
+ lock_timeout:
+ description:
+ - How long to maintain the lock on the statefile, if you use a service
+ that accepts locks (such as S3+DynamoDB).
+ type: int
+ force_init:
+ description:
+ - To avoid duplicating infra, if a state file can't be found this will
+ force a `terraform init`. Generally, this should be turned off unless
+ you intend to provision an entirely new Terraform deployment.
+ default: false
+ type: bool
+ backend_config:
+ description:
+ - A group of key-values to provide at init stage to the -backend-config parameter.
+ type: dict
+ backend_config_files:
+ description:
+ - The path to a configuration file to provide at init state to the -backend-config parameter.
+ This can accept a list of paths to multiple configuration files.
+ type: list
+ elements: path
+ version_added: '0.2.0'
+ init_reconfigure:
+ description:
+ - Forces backend reconfiguration during init.
+ default: false
+ type: bool
+ version_added: '1.3.0'
+notes:
+ - To just run a `terraform plan`, use check mode.
+requirements: [ "terraform" ]
+author: "Ryan Scott Brown (@ryansb)"
+'''
+
+EXAMPLES = """
+- name: Basic deploy of a service
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+
+- name: Define the backend configuration at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config:
+ region: "eu-west-1"
+ bucket: "some-bucket"
+ key: "random.tfstate"
+
+- name: Define the backend configuration with one or more files at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config_files:
+ - /path/to/backend_config_file_1
+ - /path/to/backend_config_file_2
+"""
+
+RETURN = """
+outputs:
+ type: complex
+ description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
+ returned: on success
+ sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
+ contains:
+ sensitive:
+ type: bool
+ returned: always
+ description: Whether Terraform has marked this value as sensitive
+ type:
+ type: str
+ returned: always
+ description: The type of the value (string, int, etc)
+ value:
+ type: str
+ returned: always
+ description: The value of the output as interpolated by Terraform
+stdout:
+ type: str
+ description: Full `terraform` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: terraform apply ...
+"""
+
+import os
+import json
+import tempfile
+from ansible.module_utils.six.moves import shlex_quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+DESTROY_ARGS = ('destroy', '-no-color', '-force')
+APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+module = None
+
+
+def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
+ if project_path in [None, ''] or '/' not in project_path:
+ module.fail_json(msg="Path for Terraform project can not be None or ''.")
+ if not os.path.exists(bin_path):
+ module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
+ if not os.path.isdir(project_path):
+ module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+
+ rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
+
+
+def _state_args(state_file):
+ if state_file and os.path.exists(state_file):
+ return ['-state', state_file]
+ if state_file and not os.path.exists(state_file):
+ module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
+ return []
+
+
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure):
+ command = [bin_path, 'init', '-input=false']
+ if backend_config:
+ for key, val in backend_config.items():
+ command.extend([
+ '-backend-config',
+ shlex_quote('{0}={1}'.format(key, val))
+ ])
+ if backend_config_files:
+ for f in backend_config_files:
+ command.extend(['-backend-config', f])
+ if init_reconfigure:
+ command.extend(['-reconfigure'])
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+
+
+def get_workspace_context(bin_path, project_path):
+ workspace_ctx = {"current": "default", "all": []}
+ command = [bin_path, 'workspace', 'list', '-no-color']
+ rc, out, err = module.run_command(command, cwd=project_path)
+ if rc != 0:
+ module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
+ for item in out.split('\n'):
+ stripped_item = item.strip()
+ if not stripped_item:
+ continue
+ elif stripped_item.startswith('* '):
+ workspace_ctx["current"] = stripped_item.replace('* ', '')
+ else:
+ workspace_ctx["all"].append(stripped_item)
+ return workspace_ctx
+
+
+def _workspace_cmd(bin_path, project_path, action, workspace):
+ command = [bin_path, 'workspace', action, workspace, '-no-color']
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ return rc, out, err
+
+
+def create_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'new', workspace)
+
+
+def select_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'select', workspace)
+
+
+def remove_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace)
+
+
+def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
+ if plan_path is None:
+ f, plan_path = tempfile.mkstemp(suffix='.tfplan')
+
+ plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
+
+ for t in (module.params.get('targets') or []):
+ plan_command.extend(['-target', t])
+
+ plan_command.extend(_state_args(state_file))
+
+ rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
+
+ if rc == 0:
+ # no changes
+ return plan_path, False, out, err, plan_command if state == 'planned' else command
+ elif rc == 1:
+ # failure to plan
+ module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
+ elif rc == 2:
+ # changes, but successful
+ return plan_path, True, out, err, plan_command if state == 'planned' else command
+
+ module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_path=dict(required=True, type='path'),
+ binary_path=dict(type='path'),
+ workspace=dict(required=False, type='str', default='default'),
+ purge_workspace=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'planned']),
+ variables=dict(type='dict'),
+ variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None),
+ plan_file=dict(type='path'),
+ state_file=dict(type='path'),
+ targets=dict(type='list', elements='str', default=[]),
+ lock=dict(type='bool', default=True),
+ lock_timeout=dict(type='int',),
+ force_init=dict(type='bool', default=False),
+ backend_config=dict(type='dict', default=None),
+ backend_config_files=dict(type='list', elements='path', default=None),
+ init_reconfigure=dict(required=False, type='bool', default=False),
+ ),
+ required_if=[('state', 'planned', ['plan_file'])],
+ supports_check_mode=True,
+ )
+
+ project_path = module.params.get('project_path')
+ bin_path = module.params.get('binary_path')
+ workspace = module.params.get('workspace')
+ purge_workspace = module.params.get('purge_workspace')
+ state = module.params.get('state')
+ variables = module.params.get('variables') or {}
+ variables_files = module.params.get('variables_files')
+ plan_file = module.params.get('plan_file')
+ state_file = module.params.get('state_file')
+ force_init = module.params.get('force_init')
+ backend_config = module.params.get('backend_config')
+ backend_config_files = module.params.get('backend_config_files')
+ init_reconfigure = module.params.get('init_reconfigure')
+
+ if bin_path is not None:
+ command = [bin_path]
+ else:
+ command = [module.get_bin_path('terraform', required=True)]
+
+ if force_init:
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
+
+ workspace_ctx = get_workspace_context(command[0], project_path)
+ if workspace_ctx["current"] != workspace:
+ if workspace not in workspace_ctx["all"]:
+ create_workspace(command[0], project_path, workspace)
+ else:
+ select_workspace(command[0], project_path, workspace)
+
+ if state == 'present':
+ command.extend(APPLY_ARGS)
+ elif state == 'absent':
+ command.extend(DESTROY_ARGS)
+
+ variables_args = []
+ for k, v in variables.items():
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, v)
+ ])
+ if variables_files:
+ for f in variables_files:
+ variables_args.extend(['-var-file', f])
+
+ preflight_validation(command[0], project_path, variables_args)
+
+ if module.params.get('lock') is not None:
+ if module.params.get('lock'):
+ command.append('-lock=true')
+ else:
+ command.append('-lock=false')
+ if module.params.get('lock_timeout') is not None:
+ command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
+
+ for t in (module.params.get('targets') or []):
+ command.extend(['-target', t])
+
+ # we aren't sure if this plan will result in changes, so assume yes
+ needs_application, changed = True, False
+
+ out, err = '', ''
+
+ if state == 'absent':
+ command.extend(variables_args)
+ elif state == 'present' and plan_file:
+ if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
+ command.append(plan_file)
+ else:
+ module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
+ else:
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, plan_file)
+ command.append(plan_file)
+
+ if needs_application and not module.check_mode and not state == 'planned':
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ # checks out to decide if changes were made during execution
+ if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
+ changed = True
+
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
+ if rc == 1:
+ module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
+ outputs = {}
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when getting Terraform outputs. "
+ "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
+ command=' '.join(outputs_command))
+ else:
+ outputs = json.loads(outputs_text)
+
+ # Restore the Terraform workspace found when running the module
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ if state == 'absent' and workspace != 'default' and purge_workspace is True:
+ remove_workspace(command[0], project_path, workspace)
+
+ module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
new file mode 100644
index 00000000..25923cb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp (@caphrim007)
+ - Robin Lee (@cheese)
+options: {}
+'''
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ community.general.xenserver_facts:
+
+- name: Print running VMs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+# Which will print:
+#
+# TASK: [Print running VMs] ***********************************************************
+# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+# "item": "Control domain on host: 10.0.13.22",
+# "msg": "Control domain on host: 10.0.13.22"
+# }
+'''
+
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ result = distro.linux_distribution()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ networks = change_keys(recs, key='name_label')
+ return networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.values():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.items():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ for param_name, param_value in rec.items():
+ # param_value may be of type xmlrpc.client.DateTime,
+ # which is not simply convertable to str.
+ # Use 'value' attr to get the str value,
+ # following an example in xmlrpc.client.DateTime document
+ if hasattr(param_value, "value"):
+ rec[param_name] = param_value.value
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+
+def get_vms(session):
+ recs = session.xenapi.VM.get_all_records()
+ if not recs:
+ return None
+ vms = change_keys(recs, key='name_label')
+ return vms
+
+
+def get_srs(session):
+ recs = session.xenapi.SR.get_all_records()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='name_label')
+ return srs
+
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible_facts=data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
new file mode 100644
index 00000000..90694861
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_firewall_policy
+short_description: Configure 1&1 firewall policy.
+description:
+ - Create, remove, reconfigure, update firewall policies.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a firewall policy state to create, remove, or update.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ firewall_policy:
+ description:
+ - The identifier (id or name) of the firewall policy used with update state.
+ type: str
+ rules:
+ description:
+ - A list of rules that will be set for the firewall policy.
+ Each rule must contain protocol parameter, in addition to three optional parameters
+ (port_from, port_to, and source)
+ type: list
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a firewall policy.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing firewall policy.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ description:
+ description:
+ - Firewall policy description. maxLength=256
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible-firewall-policy
+ description: Testing creation of firewall policies with ansible
+ rules:
+ -
+ protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible-firewall-policy
+
+- name: Update a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: update
+ firewall_policy: ansible-firewall-policy
+ name: ansible-firewall-policy-updated
+ description: Testing creation of firewall policies with ansible - updated
+
+- name: Add server to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ add_server_ips:
+ - server_identifier (id or name)
+ - server_identifier #2 (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ description: Adding rules to an existing firewall policy
+ add_rules:
+ -
+ protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+firewall_policy:
+ description: Information about the firewall policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_firewall_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
+ """
+ Assigns servers to a firewall policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in server_ids:
+ server = get_server(oneandone_conn, _server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.attach_server_firewall_policy(
+ firewall_id=firewall_id,
+ server_ips=attach_servers)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
+ """
+ Unassigns a server/IP from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ firewall_server = oneandone_conn.get_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ if firewall_server:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
+ """
+ Adds new rules to a firewall policy.
+ """
+ try:
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ if module.check_mode:
+ firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
+ if (firewall_rules and firewall_policy_id):
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.add_firewall_policy_rule(
+ firewall_id=firewall_id,
+ firewall_policy_rules=firewall_rules
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
+ """
+ Removes a rule from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_firewall_policy_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_firewall_policy(module, oneandone_conn):
+ """
+ Updates a firewall policy based on input arguments.
+ Firewall rules and server ips can be added/removed to/from
+ firewall policy. Firewall policy name and description can be
+ updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ firewall_policy_id = module.params.get('firewall_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
+ if firewall_policy is None:
+ _check_mode(module, False)
+
+ if name or description:
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.modify_firewall(
+ firewall_id=firewall_policy['id'],
+ name=name,
+ description=description)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_server_ips))
+
+ firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+
+ _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ if add_rules:
+ firewall_policy = _add_firewall_rules(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_rules)
+ _check_mode(module, firewall_policy)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+
+ _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def create_firewall_policy(module, oneandone_conn):
+ """
+ Create a new firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ firewall_policy_obj = oneandone.client.FirewallPolicy(
+ name=name,
+ description=description
+ )
+
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.create_firewall_policy(
+ firewall_policy=firewall_policy_obj,
+ firewall_policy_rules=firewall_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.firewall_policy,
+ firewall_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
+ changed = True if firewall_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_firewall_policy(module, oneandone_conn):
+ """
+ Removes a firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ fp_id = module.params.get('name')
+ firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
+ if module.check_mode:
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
+
+ changed = True if firewall_policy else False
+
+ return (changed, {
+ 'id': firewall_policy['id'],
+ 'name': firewall_policy['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ description=dict(type='str'),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a firewall policy.")
+ try:
+ (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'update':
+ if not module.params.get('firewall_policy'):
+ module.fail_json(
+ msg="'firewall_policy' parameter is required to update a firewall policy.")
+ try:
+ (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ for param in ('name', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new firewall policies." % param)
+ try:
+ (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, firewall_policy=firewall_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
new file mode 100644
index 00000000..62551560
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_load_balancer
+short_description: Configure 1&1 load balancer.
+description:
+ - Create, remove, update load balancers.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a load balancer state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ load_balancer:
+ description:
+ - The identifier (id or name) of the load balancer used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ health_check_test:
+ description:
+ - Type of the health check. At the moment, HTTP is not allowed.
+ type: str
+ choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ health_check_interval:
+ description:
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ type: str
+ health_check_path:
+ description:
+ - Url to call for checking. Required for HTTP health check. maxLength=1000
+ type: str
+ required: false
+ health_check_parse:
+ description:
+ - Regular expression to check. Required for HTTP health check. maxLength=64
+ type: str
+ required: false
+ persistence:
+ description:
+ - Persistence.
+ type: bool
+ persistence_time:
+ description:
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ type: str
+ method:
+ description:
+ - Balancing procedure.
+ type: str
+ choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ datacenter:
+ description:
+ - ID or country code of the datacenter where the load balancer will be created.
+ - If not specified, it defaults to I(US).
+ type: str
+ choices: [ "US", "ES", "DE", "GB" ]
+ required: false
+ rules:
+ description:
+ - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
+ port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ type: list
+ description:
+ description:
+ - Description of the load balancer. maxLength=256
+ type: str
+ required: false
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a load balancer.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing load balancer.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ description: Testing creation of load balancer with ansible
+ health_check_test: TCP
+ health_check_interval: 40
+ persistence: true
+ persistence_time: 1200
+ method: ROUND_ROBIN
+ datacenter: US
+ rules:
+ -
+ protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ wait: true
+ wait_timeout: 500
+ state: absent
+
+- name: Update a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer
+ name: ansible load balancer updated
+ description: Testing the update of a load balancer with ansible
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add server to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding server to a load balancer with ansible
+ add_server_ips:
+ - server identifier (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Removing server from a load balancer with ansible
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ add_rules:
+ -
+ protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+load_balancer:
+ description: Information about the load balancer that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_load_balancer,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
+METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
+ """
+ Assigns servers to a load balancer.
+ """
+ try:
+ attach_servers = []
+
+ for server_id in server_ids:
+ server = get_server(oneandone_conn, server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.attach_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ips=attach_servers)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
+ """
+ Unassigns a server/IP from a load balancer.
+ """
+ try:
+ if module.check_mode:
+ lb_server = oneandone_conn.get_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ if lb_server:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
+ """
+ Adds new rules to a load_balancer.
+ """
+ try:
+ load_balancer_rules = []
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ if module.check_mode:
+ lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
+ if (load_balancer_rules and lb_id):
+ return True
+ return False
+
+ load_balancer = oneandone_conn.add_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
+ """
+ Removes a rule from a load_balancer.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id
+ )
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_load_balancer(module, oneandone_conn):
+ """
+ Updates a load_balancer based on input arguments.
+ Load balancer rules and server ips can be added/removed to/from
+ load balancer. Load balancer name, description, health_check_test,
+ health_check_interval, persistence, persistence_time, and method
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ load_balancer_id = module.params.get('load_balancer')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
+ if load_balancer is None:
+ _check_mode(module, False)
+
+ if (name or description or health_check_test or health_check_interval or health_check_path or
+ health_check_parse or persistence or persistence_time or method):
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.modify_load_balancer(
+ load_balancer_id=load_balancer['id'],
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_server_ips))
+
+ load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+
+ _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ if add_rules:
+ load_balancer = _add_load_balancer_rules(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_rules)
+ _check_mode(module, load_balancer)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+
+ _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ try:
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_load_balancer(module, oneandone_conn):
+ """
+ Create a new load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ datacenter = module.params.get('datacenter')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ load_balancer_rules = []
+
+ datacenter_id = None
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ _check_mode(module, True)
+ load_balancer_obj = oneandone.client.LoadBalancer(
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method,
+ datacenter_id=datacenter_id
+ )
+
+ load_balancer = oneandone_conn.create_load_balancer(
+ load_balancer=load_balancer_obj,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.load_balancer,
+ load_balancer['id'],
+ wait_timeout,
+ wait_interval)
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
+ changed = True if load_balancer else False
+
+ _check_mode(module, False)
+
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_load_balancer(module, oneandone_conn):
+ """
+ Removes a load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ lb_id = module.params.get('name')
+ load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
+ if module.check_mode:
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
+
+ changed = True if load_balancer else False
+
+ return (changed, {
+ 'id': load_balancer['id'],
+ 'name': load_balancer['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ load_balancer=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ health_check_test=dict(
+ choices=HEALTH_CHECK_TESTS),
+ health_check_interval=dict(type='str'),
+ health_check_path=dict(type='str'),
+ health_check_parse=dict(type='str'),
+ persistence=dict(type='bool'),
+ persistence_time=dict(type='str'),
+ method=dict(
+ choices=METHODS),
+ datacenter=dict(
+ choices=DATACENTERS),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a load balancer.")
+ try:
+ (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('load_balancer'):
+ module.fail_json(
+ msg="'load_balancer' parameter is required for updating a load balancer.")
+ try:
+ (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
+ 'persistence_time', 'method', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new load balancers." % param)
+ try:
+ (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, load_balancer=load_balancer)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
new file mode 100644
index 00000000..79fed9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_monitoring_policy
+short_description: Configure 1&1 monitoring policy.
+description:
+ - Create, remove, update monitoring policies
+ (and add/remove ports, processes, and servers).
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a monitoring policy's state to create, remove, update.
+ type: str
+ required: false
+ default: present
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ type: str
+ monitoring_policy:
+ description:
+ - The identifier (id or name) of the monitoring policy used with update state.
+ type: str
+ agent:
+ description:
+ - Set true for using agent.
+ type: str
+ email:
+ description:
+ - User's email. maxLength=128
+ type: str
+ description:
+ description:
+ - Monitoring policy description. maxLength=256
+ type: str
+ required: false
+ thresholds:
+ description:
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical,
+ which both have alert and value suboptions. Warning is used to set limits for
+ warning alerts, critical is used to set critical alerts. alert enables alert,
+ and value is used to advise when the value is exceeded.
+ type: list
+ suboptions:
+ cpu:
+ description:
+ - Consumption limits of CPU.
+ required: true
+ ram:
+ description:
+ - Consumption limits of RAM.
+ required: true
+ disk:
+ description:
+ - Consumption limits of hard disk.
+ required: true
+ internal_ping:
+ description:
+ - Response limits of internal ping.
+ required: true
+ transfer:
+ description:
+ - Consumption limits for transfer.
+ required: true
+ ports:
+ description:
+ - Array of ports that will be monitoring.
+ type: list
+ suboptions:
+ protocol:
+ description:
+ - Internet protocol.
+ choices: [ "TCP", "UDP" ]
+ required: true
+ port:
+ description:
+ - Port number. minimum=1, maximum=65535
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ required: true
+ email_notification:
+ description:
+ - Set true for sending e-mail notifications.
+ required: true
+ processes:
+ description:
+ - Array of processes that will be monitoring.
+ type: list
+ suboptions:
+ process:
+ description:
+ - Name of the process. maxLength=50
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RUNNING", "NOT_RUNNING" ]
+ required: true
+ add_ports:
+ description:
+ - Ports to add to the monitoring policy.
+ type: list
+ required: false
+ add_processes:
+ description:
+ - Processes to add to the monitoring policy.
+ type: list
+ required: false
+ add_servers:
+ description:
+ - Servers to add to the monitoring policy.
+ type: list
+ required: false
+ remove_ports:
+ description:
+ - Ports to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_processes:
+ description:
+ - Processes to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_servers:
+ description:
+ - Servers to remove from the monitoring policy.
+ type: list
+ required: false
+ update_ports:
+ description:
+ - Ports to be updated on the monitoring policy.
+ type: list
+ required: false
+ update_processes:
+ description:
+ - Processes to be updated on the monitoring policy.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible monitoring policy
+ description: Testing creation of a monitoring policy with ansible
+ email: your@emailaddress.com
+ agent: true
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ -
+ ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
+ ports:
+ -
+ protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
+ processes:
+ -
+ process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+
+- name: Destroy a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible monitoring policy
+
+- name: Update a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy
+ name: ansible monitoring policy updated
+ description: Testing creation of a monitoring policy with ansible updated
+ email: another@emailaddress.com
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
+ wait: true
+ state: update
+
+- name: Add a port to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_ports:
+ -
+ protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing ports of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_ports:
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a port from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_ports:
+ - port_id
+ state: update
+
+- name: Add a process to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_processes:
+ -
+ process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing processes of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_processes:
+ -
+ id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ -
+ id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a process from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_processes:
+ - process_id
+ wait: true
+ state: update
+
+- name: Add server to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_servers:
+ - server id or name
+ wait: true
+ state: update
+
+- name: Remove server from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_servers:
+ - server01
+ wait: true
+ state: update
+'''
+
+RETURN = '''
+monitoring_policy:
+ description: Information about the monitoring policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_monitoring_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
+ """
+ Adds new ports to a monitoring policy.
+ """
+ try:
+ monitoring_policy_ports = []
+
+ for _port in ports:
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=_port['protocol'],
+ port=_port['port'],
+ alert_if=_port['alert_if'],
+ email_notification=_port['email_notification']
+ )
+ monitoring_policy_ports.append(monitoring_policy_port)
+
+ if module.check_mode:
+ if monitoring_policy_ports:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_port(
+ monitoring_policy_id=monitoring_policy_id,
+ ports=monitoring_policy_ports)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
+ """
+ Removes a port from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if monitoring_policy:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
+ """
+ Modifies a monitoring policy port.
+ """
+ try:
+ if module.check_mode:
+ cm_port = oneandone_conn.get_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if cm_port:
+ return True
+ return False
+
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=port['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id,
+ port=monitoring_policy_port)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
+ """
+ Adds new processes to a monitoring policy.
+ """
+ try:
+ monitoring_policy_processes = []
+
+ for _process in processes:
+ monitoring_policy_process = oneandone.client.Process(
+ process=_process['process'],
+ alert_if=_process['alert_if'],
+ email_notification=_process['email_notification']
+ )
+ monitoring_policy_processes.append(monitoring_policy_process)
+
+ if module.check_mode:
+ mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
+ if (monitoring_policy_processes and mp_id):
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_process(
+ monitoring_policy_id=monitoring_policy_id,
+ processes=monitoring_policy_processes)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
+ """
+ Removes a process from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id
+ )
+ if process:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
+ """
+ Modifies a monitoring policy process.
+ """
+ try:
+ if module.check_mode:
+ cm_process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ if cm_process:
+ return True
+ return False
+
+ monitoring_policy_process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=process['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id,
+ process=monitoring_policy_process)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
+ """
+ Attaches servers to a monitoring policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in servers:
+ server_id = get_server(oneandone_conn, _server_id)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server_id
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ servers=attach_servers)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
+ """
+ Detaches a server from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ mp_server = oneandone_conn.get_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ if mp_server:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_monitoring_policy(module, oneandone_conn):
+ """
+ Updates a monitoring_policy based on input arguments.
+ Monitoring policy ports, processes and servers can be added/removed to/from
+ a monitoring policy. Monitoring policy name, description, email,
+ thresholds for cpu, ram, disk, transfer and internal_ping
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ monitoring_policy_id = module.params.get('monitoring_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ thresholds = module.params.get('thresholds')
+ add_ports = module.params.get('add_ports')
+ update_ports = module.params.get('update_ports')
+ remove_ports = module.params.get('remove_ports')
+ add_processes = module.params.get('add_processes')
+ update_processes = module.params.get('update_processes')
+ remove_processes = module.params.get('remove_processes')
+ add_servers = module.params.get('add_servers')
+ remove_servers = module.params.get('remove_servers')
+
+ changed = False
+
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
+ if monitoring_policy is None:
+ _check_mode(module, False)
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(
+ name=name,
+ description=description,
+ email=email
+ )
+
+ _thresholds = None
+
+ if thresholds:
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ if name or description or email or thresholds:
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.modify_monitoring_policy(
+ monitoring_policy_id=monitoring_policy['id'],
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds)
+ changed = True
+
+ if add_ports:
+ if module.check_mode:
+ _check_mode(module, _add_ports(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_ports))
+
+ monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
+ changed = True
+
+ if update_ports:
+ chk_changed = False
+ for update_port in update_ports:
+ if module.check_mode:
+ chk_changed |= _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+
+ _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_ports:
+ chk_changed = False
+ for port_id in remove_ports:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+
+ _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_processes:
+ monitoring_policy = _add_processes(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_processes)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if update_processes:
+ chk_changed = False
+ for update_process in update_processes:
+ if module.check_mode:
+ chk_changed |= _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+
+ _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_processes:
+ chk_changed = False
+ for process_id in remove_processes:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+
+ _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_servers:
+ monitoring_policy = _attach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_servers)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if remove_servers:
+ chk_changed = False
+ for _server_id in remove_servers:
+ server_id = get_server(oneandone_conn, _server_id)
+
+ if module.check_mode:
+ chk_changed |= _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+
+ _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_monitoring_policy(module, oneandone_conn):
+ """
+ Creates a new monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ agent = module.params.get('agent')
+ thresholds = module.params.get('thresholds')
+ ports = module.params.get('ports')
+ processes = module.params.get('processes')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(name,
+ description,
+ email,
+ agent, )
+
+ _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
+
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ _ports = []
+ for port in ports:
+ _port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=str(port['email_notification']).lower())
+ _ports.append(_port)
+
+ _processes = []
+ for process in processes:
+ _process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=str(process['email_notification']).lower())
+ _processes.append(_process)
+
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.create_monitoring_policy(
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds,
+ ports=_ports,
+ processes=_processes
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.monitoring_policy,
+ monitoring_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if monitoring_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_monitoring_policy(module, oneandone_conn):
+ """
+ Removes a monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ mp_id = module.params.get('name')
+ monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
+ if module.check_mode:
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
+
+ changed = True if monitoring_policy else False
+
+ return (changed, {
+ 'id': monitoring_policy['id'],
+ 'name': monitoring_policy['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ agent=dict(type='str'),
+ email=dict(type='str'),
+ description=dict(type='str'),
+ thresholds=dict(type='list', default=[]),
+ ports=dict(type='list', default=[]),
+ processes=dict(type='list', default=[]),
+ add_ports=dict(type='list', default=[]),
+ update_ports=dict(type='list', default=[]),
+ remove_ports=dict(type='list', default=[]),
+ add_processes=dict(type='list', default=[]),
+ update_processes=dict(type='list', default=[]),
+ remove_processes=dict(type='list', default=[]),
+ add_servers=dict(type='list', default=[]),
+ remove_servers=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('monitoring_policy'):
+ module.fail_json(
+ msg="'monitoring_policy' parameter is required to update a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for a new monitoring policy." % param)
+ try:
+ (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
new file mode 100644
index 00000000..7eae6ea3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_private_network
+short_description: Configure 1&1 private networking.
+description:
+ - Create, remove, reconfigure, update a private network.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a network's state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ private_network:
+ description:
+ - The identifier (id or name) of the network used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Private network name used with present state. Used as identifier (id or name) when used with absent state.
+ type: str
+ description:
+ description:
+ - Set a description for the network.
+ type: str
+ datacenter:
+ description:
+ - The identifier of the datacenter where the private network will be created
+ type: str
+ choices: [US, ES, DE, GB]
+ network_address:
+ description:
+ - Set a private network space, i.e. 192.168.1.0
+ type: str
+ subnet_mask:
+ description:
+ - Set the netmask for the private network, i.e. 255.255.255.0
+ type: str
+ add_members:
+ description:
+ - List of server identifiers (name or id) to be added to the private network.
+ type: list
+ remove_members:
+ description:
+ - List of server identifiers (name or id) to be removed from the private network.
+ type: list
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ name: backup_network
+ description: Testing creation of a private network with ansible
+ network_address: 70.35.193.100
+ subnet_mask: 255.0.0.0
+ datacenter: US
+
+- name: Destroy a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: backup_network
+
+- name: Modify the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ network_address: 192.168.2.0
+ subnet_mask: 255.255.255.0
+
+- name: Add members to the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ add_members:
+ - server identifier (id or name)
+
+- name: Remove members from the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ remove_members:
+ - server identifier (id or name)
+'''
+
+RETURN = '''
+private_network:
+ description: Information about the private network.
+ type: dict
+ sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_private_network,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_servers(module, oneandone_conn, name, members):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id and members:
+ return True
+ return False
+
+ network = oneandone_conn.attach_private_network_servers(
+ private_network_id=private_network_id,
+ server_ids=members)
+
+ return network
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_member(module, oneandone_conn, name, member_id):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id:
+ network_member = oneandone_conn.get_private_network_server(
+ private_network_id=private_network_id,
+ server_id=member_id)
+ if network_member:
+ return True
+ return False
+
+ network = oneandone_conn.remove_private_network_server(
+ private_network_id=name,
+ server_id=member_id)
+
+ return network
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_network(module, oneandone_conn):
+ """
+ Create new private network
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any network was added.
+ """
+ name = module.params.get('name')
+ description = module.params.get('description')
+ network_address = module.params.get('network_address')
+ subnet_mask = module.params.get('subnet_mask')
+ datacenter = module.params.get('datacenter')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ network = oneandone_conn.create_private_network(
+ private_network=oneandone.client.PrivateNetwork(
+ name=name,
+ description=description,
+ network_address=network_address,
+ subnet_mask=subnet_mask,
+ datacenter_id=datacenter_id
+ ))
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.private_network,
+ network['id'],
+ wait_timeout,
+ wait_interval)
+ network = get_private_network(oneandone_conn,
+ network['id'],
+ True)
+
+ changed = True if network else False
+
+ _check_mode(module, False)
+
+ return (changed, network)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_network(module, oneandone_conn):
+ """
+ Modifies a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ _private_network_id = module.params.get('private_network')
+ _name = module.params.get('name')
+ _description = module.params.get('description')
+ _network_address = module.params.get('network_address')
+ _subnet_mask = module.params.get('subnet_mask')
+ _add_members = module.params.get('add_members')
+ _remove_members = module.params.get('remove_members')
+
+ changed = False
+
+ private_network = get_private_network(oneandone_conn,
+ _private_network_id,
+ True)
+ if private_network is None:
+ _check_mode(module, False)
+
+ if _name or _description or _network_address or _subnet_mask:
+ _check_mode(module, True)
+ private_network = oneandone_conn.modify_private_network(
+ private_network_id=private_network['id'],
+ name=_name,
+ description=_description,
+ network_address=_network_address,
+ subnet_mask=_subnet_mask)
+ changed = True
+
+ if _add_members:
+ instances = []
+
+ for member in _add_members:
+ instance_id = get_server(oneandone_conn, member)
+ instance_obj = oneandone.client.AttachServer(server_id=instance_id)
+
+ instances.extend([instance_obj])
+ private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
+ _check_mode(module, private_network)
+ changed = True
+
+ if _remove_members:
+ chk_changed = False
+ for member in _remove_members:
+ instance = get_server(oneandone_conn, member, True)
+
+ if module.check_mode:
+ chk_changed |= _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ _check_mode(module, instance and chk_changed)
+
+ _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ private_network = get_private_network(oneandone_conn,
+ private_network['id'],
+ True)
+ changed = True
+
+ return (changed, private_network)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_network(module, oneandone_conn):
+ """
+ Removes a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+ """
+ try:
+ pn_id = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ private_network_id = get_private_network(oneandone_conn, pn_id)
+ if module.check_mode:
+ if private_network_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ private_network = oneandone_conn.delete_private_network(private_network_id)
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.private_network,
+ private_network['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if private_network else False
+
+ return (changed, {
+ 'id': private_network['id'],
+ 'name': private_network['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ private_network=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ network_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ add_members=dict(type='list', default=[]),
+ remove_members=dict(type='list', default=[]),
+ datacenter=dict(
+ choices=DATACENTERS),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a network.")
+ try:
+ (changed, private_network) = remove_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('private_network'):
+ module.fail_json(
+ msg="'private_network' parameter is required for updating a network.")
+ try:
+ (changed, private_network) = update_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for new networks.")
+ try:
+ (changed, private_network) = create_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, private_network=private_network)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
new file mode 100644
index 00000000..edefbc93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_public_ip
+short_description: Configure 1&1 public IPs.
+description:
+ - Create, update, and remove public IPs.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a public ip state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ reverse_dns:
+ description:
+ - Reverse DNS name. maxLength=256
+ type: str
+ required: false
+ datacenter:
+ description:
+ - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ type: str
+ choices: [US, ES, DE, GB]
+ default: US
+ required: false
+ type:
+ description:
+ - Type of IP. Currently, only IPV4 is available.
+ type: str
+ choices: ["IPV4", "IPV6"]
+ default: 'IPV4'
+ required: false
+ public_ip_id:
+ description:
+ - The ID of the public IP used with update and delete states.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ reverse_dns: example.com
+ datacenter: US
+ type: IPV4
+
+- name: Update a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ reverse_dns: secondexample.com
+ state: update
+
+- name: Delete a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ state: absent
+'''
+
+RETURN = '''
+public_ip:
+ description: Information about the public ip that was processed
+ type: dict
+ sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_public_ip,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+TYPES = ['IPV4', 'IPV6']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def create_public_ip(module, oneandone_conn):
+ """
+ Create new public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was added.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ datacenter = module.params.get('datacenter')
+ ip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.create_public_ip(
+ reverse_dns=reverse_dns,
+ ip_type=ip_type,
+ datacenter_id=datacenter_id)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_public_ip(module, oneandone_conn):
+ """
+ Update a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was changed.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ public_ip_id = module.params.get('public_ip_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.modify_public_ip(
+ ip_id=public_ip['id'],
+ reverse_dns=reverse_dns)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_public_ip(module, oneandone_conn):
+ """
+ Delete a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was deleted.
+ """
+ public_ip_id = module.params.get('public_ip_id')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ deleted_public_ip = oneandone_conn.delete_public_ip(
+ ip_id=public_ip['id'])
+
+ changed = True if deleted_public_ip else False
+
+ return (changed, {
+ 'id': public_ip['id']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ public_ip_id=dict(type='str'),
+ reverse_dns=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ type=dict(
+ choices=TYPES,
+ default='IPV4'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to delete a public ip.")
+ try:
+ (changed, public_ip) = delete_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to update a public ip.")
+ try:
+ (changed, public_ip) = update_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ try:
+ (changed, public_ip) = create_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, public_ip=public_ip)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
new file mode 100644
index 00000000..1e6caab5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
@@ -0,0 +1,705 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_server
+short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
+description:
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
+ When the server is created it can optionally wait for it to be 'running' before returning.
+options:
+ state:
+ description:
+ - Define a server's state to create, remove, start or stop it.
+ type: str
+ default: present
+ choices: [ "present", "absent", "running", "stopped" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1. Overrides the
+ ONEANDONE_AUTH_TOKEN environment variable.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ datacenter:
+ description:
+ - The datacenter location.
+ type: str
+ default: US
+ choices: [ "US", "ES", "DE", "GB" ]
+ hostname:
+ description:
+ - The hostname or ID of the server. Only used when state is 'present'.
+ type: str
+ description:
+ description:
+ - The description of the server.
+ type: str
+ appliance:
+ description:
+ - The operating system name or ID for the server.
+ It is required only for 'present' state.
+ type: str
+ fixed_instance_size:
+ description:
+ - The instance size name or ID of the server.
+ It is required only for 'present' state, and it is mutually exclusive with
+ vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ type: str
+ vcore:
+ description:
+ - The total number of processors.
+ It must be provided with cores_per_processor, ram, and hdds parameters.
+ type: int
+ cores_per_processor:
+ description:
+ - The number of cores per processor.
+ It must be provided with vcore, ram, and hdds parameters.
+ type: int
+ ram:
+ description:
+ - The amount of RAM memory.
+ It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ type: float
+ hdds:
+ description:
+ - A list of hard disks with nested "size" and "is_main" properties.
+ It must be provided with vcore, cores_per_processor, and ram parameters.
+ type: list
+ private_network:
+ description:
+ - The private network name or ID.
+ type: str
+ firewall_policy:
+ description:
+ - The firewall policy name or ID.
+ type: str
+ load_balancer:
+ description:
+ - The load balancer name or ID.
+ type: str
+ monitoring_policy:
+ description:
+ - The monitoring policy name or ID.
+ type: str
+ server:
+ description:
+ - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
+ type: str
+ count:
+ description:
+ - The number of servers to create.
+ type: int
+ default: 1
+ ssh_key:
+ description:
+ - User's public SSH key (contents, not path).
+ type: raw
+ server_type:
+ description:
+ - The type of server to be built.
+ type: str
+ default: "cloud"
+ choices: [ "cloud", "baremetal", "k8s_node" ]
+ wait:
+ description:
+ - Wait for the server to be in state 'running' before returning.
+ Also used for delete operation (set to 'false' if you don't want to wait
+ for each individual server to be deleted before moving on with
+ other tasks.)
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the wait_for methods
+ type: int
+ default: 5
+ auto_increment:
+ description:
+ - When creating multiple servers at once, whether to differentiate
+ hostnames by appending a count after them or substituting the count
+ where there is a %02d or %03d in the hostname string.
+ type: bool
+ default: 'yes'
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+
+'''
+
+EXAMPLES = '''
+- name: Create three servers and enumerate their names
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ fixed_instance_size: XL
+ datacenter: US
+ appliance: C5A349786169F140BCBC335675014C08
+ auto_increment: true
+ count: 3
+
+- name: Create three servers, passing in an ssh_key
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ vcore: 2
+ cores_per_processor: 4
+ ram: 8.0
+ hdds:
+ - size: 50
+ is_main: false
+ datacenter: ES
+ appliance: C5A349786169F140BCBC335675014C08
+ count: 3
+ wait: yes
+ wait_timeout: 600
+ wait_interval: 10
+ ssh_key: SSH_PUBLIC_KEY
+
+- name: Removing server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: absent
+ server: 'node01'
+
+- name: Starting server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: running
+ server: 'node01'
+
+- name: Stopping server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: stopped
+ server: 'node01'
+'''
+
+RETURN = '''
+servers:
+ description: Information about each server that was processed
+ type: list
+ sample: '[{"hostname": "my-server", "id": "server-id"}]'
+ returned: always
+'''
+
+import os
+import time
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_fixed_instance_size,
+ get_appliance,
+ get_private_network,
+ get_monitoring_policy,
+ get_firewall_policy,
+ get_load_balancer,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+ONEANDONE_SERVER_STATES = (
+ 'DEPLOYING',
+ 'POWERED_OFF',
+ 'POWERED_ON',
+ 'POWERING_ON',
+ 'POWERING_OFF',
+)
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _create_server(module, oneandone_conn, hostname, description,
+ fixed_instance_size_id, vcore, cores_per_processor, ram,
+ hdds, datacenter_id, appliance_id, ssh_key,
+ private_network_id, firewall_policy_id, load_balancer_id,
+ monitoring_policy_id, server_type, wait, wait_timeout,
+ wait_interval):
+
+ try:
+ existing_server = get_server(oneandone_conn, hostname)
+
+ if existing_server:
+ if module.check_mode:
+ return False
+ return None
+
+ if module.check_mode:
+ return True
+
+ server = oneandone_conn.create_server(
+ oneandone.client.Server(
+ name=hostname,
+ description=description,
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ appliance_id=appliance_id,
+ datacenter_id=datacenter_id,
+ rsa_key=ssh_key,
+ private_network_id=private_network_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ monitoring_policy_id=monitoring_policy_id,
+ server_type=server_type,), hdds)
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+
+ return server
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _insert_network_data(server):
+ for addr_data in server['ips']:
+ if addr_data['type'] == 'IPV6':
+ server['public_ipv6'] = addr_data['ip']
+ elif addr_data['type'] == 'IPV4':
+ server['public_ipv4'] = addr_data['ip']
+ return server
+
+
+def create_server(module, oneandone_conn):
+ """
+ Create new server
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any server was added, and a 'servers' attribute with the list of the
+ created servers' hostname, id and ip addresses.
+ """
+ hostname = module.params.get('hostname')
+ description = module.params.get('description')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ fixed_instance_size = module.params.get('fixed_instance_size')
+ vcore = module.params.get('vcore')
+ cores_per_processor = module.params.get('cores_per_processor')
+ ram = module.params.get('ram')
+ hdds = module.params.get('hdds')
+ datacenter = module.params.get('datacenter')
+ appliance = module.params.get('appliance')
+ ssh_key = module.params.get('ssh_key')
+ private_network = module.params.get('private_network')
+ monitoring_policy = module.params.get('monitoring_policy')
+ firewall_policy = module.params.get('firewall_policy')
+ load_balancer = module.params.get('load_balancer')
+ server_type = module.params.get('server_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ fixed_instance_size_id = None
+ if fixed_instance_size:
+ fixed_instance_size_id = get_fixed_instance_size(
+ oneandone_conn,
+ fixed_instance_size)
+ if fixed_instance_size_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='fixed_instance_size %s not found.' % fixed_instance_size)
+
+ appliance_id = get_appliance(oneandone_conn, appliance)
+ if appliance_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='appliance %s not found.' % appliance)
+
+ private_network_id = None
+ if private_network:
+ private_network_id = get_private_network(
+ oneandone_conn,
+ private_network)
+ if private_network_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='private network %s not found.' % private_network)
+
+ monitoring_policy_id = None
+ if monitoring_policy:
+ monitoring_policy_id = get_monitoring_policy(
+ oneandone_conn,
+ monitoring_policy)
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='monitoring policy %s not found.' % monitoring_policy)
+
+ firewall_policy_id = None
+ if firewall_policy:
+ firewall_policy_id = get_firewall_policy(
+ oneandone_conn,
+ firewall_policy)
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='firewall policy %s not found.' % firewall_policy)
+
+ load_balancer_id = None
+ if load_balancer:
+ load_balancer_id = get_load_balancer(
+ oneandone_conn,
+ load_balancer)
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='load balancer %s not found.' % load_balancer)
+
+ if auto_increment:
+ hostnames = _auto_increment_hostname(count, hostname)
+ descriptions = _auto_increment_description(count, description)
+ else:
+ hostnames = [hostname] * count
+ descriptions = [description] * count
+
+ hdd_objs = []
+ if hdds:
+ for hdd in hdds:
+ hdd_objs.append(oneandone.client.Hdd(
+ size=hdd['size'],
+ is_main=hdd['is_main']
+ ))
+
+ servers = []
+ for index, name in enumerate(hostnames):
+ server = _create_server(
+ module=module,
+ oneandone_conn=oneandone_conn,
+ hostname=name,
+ description=descriptions[index],
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ hdds=hdd_objs,
+ datacenter_id=datacenter_id,
+ appliance_id=appliance_id,
+ ssh_key=ssh_key,
+ private_network_id=private_network_id,
+ monitoring_policy_id=monitoring_policy_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ server_type=server_type,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ wait_interval=wait_interval)
+ if server:
+ servers.append(server)
+
+ changed = False
+
+ if servers:
+ for server in servers:
+ if server:
+ _check_mode(module, True)
+ _check_mode(module, False)
+ servers = [_insert_network_data(_server) for _server in servers]
+ changed = True
+
+ _check_mode(module, False)
+
+ return (changed, servers)
+
+
+def remove_server(module, oneandone_conn):
+ """
+ Removes a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ the server was removed, and a 'removed_server' attribute with
+ the removed server's hostname and id.
+ """
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+ removed_server = None
+
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ _check_mode(module, True)
+ try:
+ oneandone_conn.delete_server(server_id=server['id'])
+ if wait:
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ changed = True
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to terminate the server: %s" % str(ex))
+
+ removed_server = {
+ 'id': server['id'],
+ 'hostname': server['name']
+ }
+ _check_mode(module, False)
+
+ return (changed, removed_server)
+
+
+def startstop_server(module, oneandone_conn):
+ """
+ Starts or Stops a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary with a 'changed' attribute indicating whether
+ anything has changed for the server as a result of this function
+ being run, and a 'server' attribute with basic information for
+ the server.
+ """
+ state = module.params.get('state')
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+
+ # Resolve server
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ # Attempt to change the server state, only if it's not already there
+ # or on its way.
+ try:
+ if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_OFF',
+ method='SOFTWARE')
+ elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_ON',
+ method='SOFTWARE')
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to set server %s to state %s: %s" % (
+ server_id, state, str(ex)))
+
+ _check_mode(module, False)
+
+ # Make sure the server has reached the desired state
+ if wait:
+ operation_completed = False
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+ server_state = server['status']['state']
+ if state == 'stopped' and server_state == 'POWERED_OFF':
+ operation_completed = True
+ break
+ if state == 'running' and server_state == 'POWERED_ON':
+ operation_completed = True
+ break
+ if not operation_completed:
+ module.fail_json(
+ msg="Timeout waiting for server %s to get to state %s" % (
+ server_id, state))
+
+ changed = True
+ server = _insert_network_data(server)
+
+ _check_mode(module, False)
+
+ return (changed, server)
+
+
+def _auto_increment_hostname(count, hostname):
+ """
+ Allow a custom incremental count in the hostname when defined with the
+ string formatting (%) operator. Otherwise, increment using name-01,
+ name-02, name-03, and so forth.
+ """
+ if '%' not in hostname:
+ hostname = "%s-%%01d" % hostname
+
+ return [
+ hostname % i
+ for i in xrange(1, count + 1)
+ ]
+
+
+def _auto_increment_description(count, description):
+ """
+ Allow the incremental count in the description when defined with the
+ string formatting (%) operator. Otherwise, repeat the same description.
+ """
+ if '%' in description:
+ return [
+ description % i
+ for i in xrange(1, count + 1)
+ ]
+ else:
+ return [description] * count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
+ no_log=True),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ hostname=dict(type='str'),
+ description=dict(type='str'),
+ appliance=dict(type='str'),
+ fixed_instance_size=dict(type='str'),
+ vcore=dict(type='int'),
+ cores_per_processor=dict(type='int'),
+ ram=dict(type='float'),
+ hdds=dict(type='list'),
+ count=dict(type='int', default=1),
+ ssh_key=dict(type='raw'),
+ auto_increment=dict(type='bool', default=True),
+ server=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ private_network=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ load_balancer=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
+ ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
+ required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for deleting a server.")
+ try:
+ (changed, servers) = remove_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for starting/stopping a server.")
+ try:
+ (changed, servers) = startstop_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('hostname',
+ 'appliance',
+ 'datacenter'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new server." % param)
+ try:
+ (changed, servers) = create_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, servers=servers)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py
new file mode 100644
index 00000000..f1e74aa6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_server_info) instead.
+short_description: Gather facts about Online servers.
+description:
+ - Gather facts about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server facts
+ community.general.online_server_facts:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+'''
+
+RETURN = r'''
+---
+online_server_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_facts": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineServerFacts, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_facts = OnlineServerFacts(module).all_detailed_servers()
+ module.exit_json(
+ ansible_facts={'online_server_facts': servers_facts}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
new file mode 100644
index 00000000..f0e73aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_info
+short_description: Gather information about Online servers.
+description:
+ - Gather information about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server information
+ community.general.online_server_info:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_server_info }}"
+'''
+
+RETURN = r'''
+---
+online_server_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_info": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineServerInfo, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_info = OnlineServerInfo(module).all_detailed_servers()
+ module.exit_json(
+ online_server_info=servers_info
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py
new file mode 100644
index 00000000..7b78924e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_user_info) instead.
+short_description: Gather facts about Online user.
+description:
+ - Gather facts about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user facts
+ community.general.online_user_facts:
+'''
+
+RETURN = r'''
+---
+online_user_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_facts": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineUserFacts, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
new file mode 100644
index 00000000..093a2c68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_info
+short_description: Gather information about Online user.
+description:
+ - Gather information about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user info
+ community.general.online_user_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_user_info }}"
+'''
+
+RETURN = r'''
+---
+online_user_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_info": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineUserInfo, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ online_user_info=OnlineUserInfo(module).get_resources()
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
new file mode 100644
index 00000000..efe1ce22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+#
+# Copyright 2018 www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: one_host
+
+short_description: Manages OpenNebula Hosts
+
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula Hosts"
+
+options:
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If C(absent) the host will be deleted from the cluster.
+ - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
+ - If C(enabled) the host is fully operational.
+ - C(disabled), e.g. to perform maintenance operations.
+ - C(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
+
+extends_documentation_fragment:
+- community.general.opennebula
+
+
+author:
+ - Rafael del Valle (@rvalle)
+'''
+
+EXAMPLES = '''
+- name: Create a new host in OpenNebula
+ community.general.one_host:
+ name: host1
+ cluster_id: 1
+ api_url: http://127.0.0.1:2633/RPC2
+
+- name: Create a host and adjust its template
+ community.general.one_host:
+ name: host2
+ cluster_name: default
+ template:
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+'''
+
+# TODO: pending setting guidelines on returned values
+RETURN = '''
+'''
+
+# TODO: Documentation on valid state transitions is required to properly implement all valid cases
+# TODO: To be coherent with CLI this module should also provide "flush" functionality
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+try:
+ from pyone import HOST_STATES, HOST_STATUS
+except ImportError:
+ pass # handled at module utils
+
+
+# Pseudo definitions...
+
+HOST_ABSENT = -99 # the host is absent (special case defined by this module)
+
+
+class HostModule(OpenNebulaModule):
+
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
+ im_mad_name=dict(type='str', default="kvm"),
+ vmm_mad_name=dict(type='str', default="kvm"),
+ cluster_id=dict(type='int', default=0),
+ cluster_name=dict(type='str'),
+ labels=dict(type='list'),
+ template=dict(type='dict', aliases=['attributes']),
+ )
+
+ mutually_exclusive = [
+ ['cluster_id', 'cluster_name']
+ ]
+
+ OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
+
+ def allocate_host(self):
+ """
+ Creates a host entry in OpenNebula
+ Returns: True on success, fails otherwise.
+
+ """
+ if not self.one.host.allocate(self.get_parameter('name'),
+ self.get_parameter('vmm_mad_name'),
+ self.get_parameter('im_mad_name'),
+ self.get_parameter('cluster_id')):
+ self.fail(msg="could not allocate host")
+ else:
+ self.result['changed'] = True
+ return True
+
+ def wait_for_host_state(self, host, target_states):
+ """
+ Utility method that waits for a host state.
+ Args:
+ host:
+ target_states:
+
+ """
+ return self.wait_for_state('host',
+ lambda: self.one.host.info(host.ID).STATE,
+ lambda s: HOST_STATES(s).name, target_states,
+ invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
+
+ def run(self, one, module, result):
+
+ # Get the list of hosts
+ host_name = self.get_parameter("name")
+ host = self.get_host_by_name(host_name)
+
+ # manage host state
+ desired_state = self.get_parameter('state')
+ if bool(host):
+ current_state = host.STATE
+ current_state_name = HOST_STATES(host.STATE).name
+ else:
+ current_state = HOST_ABSENT
+ current_state_name = "ABSENT"
+
+ # apply properties
+ if desired_state == 'present':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
+ self.fail(msg="invalid host state %s" % current_state_name)
+
+ elif desired_state == 'enabled':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.ENABLED):
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not enable host")
+ elif current_state in [HOST_STATES.MONITORED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
+
+ elif desired_state == 'disabled':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be put in disabled state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.DISABLED):
+ self.wait_for_host_state(host, [HOST_STATES.DISABLED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not disable host")
+ elif current_state in [HOST_STATES.DISABLED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
+
+ elif desired_state == 'offline':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be placed in offline state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
+ if one.host.status(host.ID, HOST_STATUS.OFFLINE):
+ self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not set host offline")
+ elif current_state in [HOST_STATES.OFFLINE]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
+
+ elif desired_state == 'absent':
+ if current_state != HOST_ABSENT:
+ if one.host.delete(host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="could not delete host from cluster")
+
+ # if we reach this point we can assume that the host was taken to the desired state
+
+ if desired_state != "absent":
+ # manipulate or modify the template
+ desired_template_changes = self.get_parameter('template')
+
+ if desired_template_changes is None:
+ desired_template_changes = dict()
+
+ # complete the template with specific ansible parameters
+ if self.is_parameter('labels'):
+ desired_template_changes['LABELS'] = self.get_parameter('labels')
+
+ if self.requires_template_update(host.TEMPLATE, desired_template_changes):
+ # setup the root element so that pyone will generate XML instead of attribute vector
+ desired_template_changes = {"TEMPLATE": desired_template_changes}
+ if one.host.update(host.ID, desired_template_changes, 1): # merge the template
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host template")
+
+ # the cluster
+ if host.CLUSTER_ID != self.get_parameter('cluster_id'):
+ if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host cluster")
+
+ # return
+ self.exit()
+
+
+def main():
+ HostModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
new file mode 100644
index 00000000..867bab62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image
+short_description: Manages OpenNebula images
+description:
+ - Manages OpenNebula images
+requirements:
+ - python-oca
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ id:
+ description:
+ - A C(id) of the image you would like to manage.
+ type: int
+ name:
+ description:
+ - A C(name) of the image you would like to manage.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the image
+ - C(absent) - delete the image
+ - C(cloned) - clone the image
+ - C(renamed) - rename the image to the C(new_name)
+ choices: ["present", "absent", "cloned", "renamed"]
+ default: present
+ type: str
+ enabled:
+ description:
+ - Whether the image should be enabled or disabled.
+ type: bool
+ new_name:
+ description:
+ - A name that will be assigned to the existing or new image.
+ - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the IMAGE by id
+ community.general.one_image:
+ id: 45
+ register: result
+
+- name: Print the IMAGE properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Rename existing IMAGE
+ community.general.one_image:
+ id: 34
+ state: renamed
+ new_name: bar-image
+
+- name: Disable the IMAGE by id
+ community.general.one_image:
+ id: 37
+ enabled: no
+
+- name: Enable the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ enabled: yes
+
+- name: Clone the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ state: cloned
+ new_name: bar-image-clone
+ register: result
+
+- name: Delete the IMAGE by id
+ community.general.one_image:
+ id: '{{ result.id }}'
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: image id
+ type: int
+ returned: success
+ sample: 153
+name:
+ description: image name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: image's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: image's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: image's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: image's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of image instance
+ type: str
+ returned: success
+ sample: READY
+used:
+ description: is image in use
+ type: bool
+ returned: success
+ sample: true
+running_vms:
+ description: count of running vms that use this image
+ type: int
+ returned: success
+ sample: 7
+'''
+
+try:
+ import oca
+ HAS_OCA = True
+except ImportError:
+ HAS_OCA = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_image(module, client, predicate):
+ pool = oca.ImagePool(client)
+ # Filter -2 means fetch all images user can Use
+ pool.info(filter=-2)
+
+ for image in pool:
+ if predicate(image):
+ return image
+
+ return None
+
+
+def get_image_by_name(module, client, image_name):
+ return get_image(module, client, lambda image: (image.name == image_name))
+
+
+def get_image_by_id(module, client, image_id):
+ return get_image(module, client, lambda image: (image.id == image_id))
+
+
+def get_image_instance(module, client, requested_id, requested_name):
+ if requested_id:
+ return get_image_by_id(module, client, requested_id)
+ else:
+ return get_image_by_name(module, client, requested_name)
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ image.info()
+
+ info = {
+ 'id': image.id,
+ 'name': image.name,
+ 'state': IMAGE_STATES[image.state],
+ 'running_vms': image.running_vms,
+ 'used': bool(image.running_vms),
+ 'user_name': image.uname,
+ 'user_id': image.uid,
+ 'group_name': image.gname,
+ 'group_id': image.gid,
+ }
+
+ return info
+
+
+def wait_for_state(module, image, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ image.info()
+ state = image.state
+
+ if state_predicate(state):
+ return image
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_ready(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
+
+
+def wait_for_delete(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
+
+
+def enable_image(module, client, image, enable):
+ image.info()
+ changed = False
+
+ state = image.state
+
+ if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
+ if enable:
+ module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
+ else:
+ module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
+
+ if ((enable and state != IMAGE_STATES.index('READY')) or
+ (not enable and state != IMAGE_STATES.index('DISABLED'))):
+ changed = True
+
+ if changed and not module.check_mode:
+ client.call('image.enable', image.id, enable)
+
+ result = get_image_info(image)
+ result['changed'] = changed
+
+ return result
+
+
+def clone_image(module, client, image, new_name):
+ if new_name is None:
+ new_name = "Copy of " + image.name
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ result = get_image_info(tmp_image)
+ result['changed'] = False
+ return result
+
+ if image.state == IMAGE_STATES.index('DISABLED'):
+ module.fail_json(msg="Cannot clone DISABLED image")
+
+ if not module.check_mode:
+ new_id = client.call('image.clone', image.id, new_name)
+ image = get_image_by_id(module, client, new_id)
+ wait_for_ready(module, image)
+
+ result = get_image_info(image)
+ result['changed'] = True
+
+ return result
+
+
+def rename_image(module, client, image, new_name):
+ if new_name is None:
+ module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
+
+ if new_name == image.name:
+ result = get_image_info(image)
+ result['changed'] = False
+ return result
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
+
+ if not module.check_mode:
+ client.call('image.rename', image.id, new_name)
+
+ result = get_image_info(image)
+ result['changed'] = True
+ return result
+
+
+def delete_image(module, client, image):
+
+ if not image:
+ return {'changed': False}
+
+ if image.running_vms > 0:
+ module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
+
+ if not module.check_mode:
+ client.call('image.delete', image.id)
+ wait_for_delete(module, image)
+
+ return {'changed': True}
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "id": {"required": False, "type": "int"},
+ "name": {"required": False, "type": "str"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'cloned', 'renamed'],
+ "type": "str"
+ },
+ "enabled": {"required": False, "type": "bool"},
+ "new_name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['id', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_OCA:
+ module.fail_json(msg='This module requires python-oca to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ state = params.get('state')
+ enabled = params.get('enabled')
+ new_name = params.get('new_name')
+ client = oca.Client(auth.username + ':' + auth.password, auth.url)
+
+ result = {}
+
+ if not id and state == 'renamed':
+ module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
+
+ image = get_image_instance(module, client, id, name)
+ if not image and state != 'absent':
+ if id:
+ module.fail_json(msg="There is no image with id=" + str(id))
+ else:
+ module.fail_json(msg="There is no image with name=" + name)
+
+ if state == 'absent':
+ result = delete_image(module, client, image)
+ else:
+ result = get_image_info(image)
+ changed = False
+ result['changed'] = False
+
+ if enabled is not None:
+ result = enable_image(module, client, image, enabled)
+ if state == "cloned":
+ result = clone_image(module, client, image, new_name)
+ elif state == "renamed":
+ result = rename_image(module, client, image, new_name)
+
+ changed = changed or result['changed']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
new file mode 100644
index 00000000..68f8398f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
@@ -0,0 +1,768 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_service
+short_description: Deploy and manage OpenNebula services
+description:
+ - Manage OpenNebula services
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula OneFlow API server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the ONEFLOW_URL environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ type: str
+ template_name:
+ description:
+ - Name of service template to use to create a new instance of a service
+ type: str
+ template_id:
+ description:
+ - ID of a service template to use to create a new instance of a service
+ type: int
+ service_id:
+ description:
+ - ID of a service instance that you would like to manage
+ type: int
+ service_name:
+ description:
+ - Name of a service instance that you would like to manage
+ type: str
+ unique:
+ description:
+ - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when
+ - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below.
+ type: bool
+ default: no
+ state:
+ description:
+ - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name).
+ - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name).
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ mode:
+ description:
+ - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the service
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the service
+ type: int
+ wait:
+ description:
+ - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ custom_attrs:
+ description:
+ - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ default: {}
+ type: dict
+ role:
+ description:
+ - Name of the role whose cardinality should be changed
+ type: str
+ cardinality:
+ description:
+ - Number of VMs for the specified role
+ type: int
+ force:
+ description:
+ - Force the new cardinality even if it is outside the limits
+ type: bool
+ default: no
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Instantiate a new service
+ community.general.one_service:
+ template_id: 90
+ register: result
+
+- name: Print service properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Instantiate a new service with specified service_name, service group and mode
+ community.general.one_service:
+ template_name: 'app1_template'
+ service_name: 'app1'
+ group_id: 1
+ mode: '660'
+
+- name: Instantiate a new service with template_id and pass custom_attrs dict
+ community.general.one_service:
+ template_id: 90
+ custom_attrs:
+ public_network_id: 21
+ private_network_id: 26
+
+- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
+ community.general.one_service:
+ template_id: 53
+ service_name: 'foo'
+ unique: yes
+
+- name: Delete a service by ID
+ community.general.one_service:
+ service_id: 153
+ state: absent
+
+- name: Get service info
+ community.general.one_service:
+ service_id: 153
+ register: service_info
+
+- name: Change service owner, group and mode
+ community.general.one_service:
+ service_name: 'app2'
+ owner_id: 34
+ group_id: 113
+ mode: '600'
+
+- name: Instantiate service and wait for it to become RUNNING
+ community.general.one_service:
+ template_id: 43
+ service_name: 'foo1'
+
+- name: Wait service to become RUNNING
+ community.general.one_service:
+ service_id: 112
+ wait: yes
+
+- name: Change role cardinality
+ community.general.one_service:
+ service_id: 153
+ role: bar
+ cardinality: 5
+
+- name: Change role cardinality and wait for it to be applied
+ community.general.one_service:
+ service_id: 112
+ role: foo
+ cardinality: 7
+ wait: yes
+'''
+
+RETURN = '''
+service_id:
+ description: service id
+ type: int
+ returned: success
+ sample: 153
+service_name:
+ description: service name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: service's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: service's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: service's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: service's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of service instance
+ type: str
+ returned: success
+ sample: RUNNING
+mode:
+ description: service's mode
+ type: int
+ returned: success
+ sample: 660
+roles:
+ description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
+ type: list
+ returned: success
+ sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]},
+ {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]'
+'''
+
+import os
+import sys
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
+ "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
+
+
+def get_all_templates(module, auth):
+ try:
+ all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(all_templates.read())
+
+
+def get_template(module, auth, pred):
+ all_templates_dict = get_all_templates(module, auth)
+
+ found = 0
+ found_template = None
+ template_name = ''
+
+ if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
+ for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(template):
+ found = found + 1
+ found_template = template
+ template_name = template["NAME"]
+
+ if found <= 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg="There is no template with unique name: " + template_name)
+ else:
+ return found_template
+
+
+def get_all_services(module, auth):
+ try:
+ response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(response.read())
+
+
+def get_service(module, auth, pred):
+ all_services_dict = get_all_services(module, auth)
+
+ found = 0
+ found_service = None
+ service_name = ''
+
+ if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
+ for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(service):
+ found = found + 1
+ found_service = service
+ service_name = service["NAME"]
+
+ # fail if there are more services with same name
+ if found > 1:
+ module.fail_json(msg="There are multiple services with a name: '" +
+ service_name + "'. You have to use a unique service name or use 'service_id' instead.")
+ elif found <= 0:
+ return None
+ else:
+ return found_service
+
+
+def get_service_by_id(module, auth, service_id):
+ return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
+
+
+def get_service_by_name(module, auth, service_name):
+ return get_service(module, auth, lambda service: (service["NAME"] == service_name))
+
+
+def get_service_info(module, auth, service):
+
+ result = {
+ "service_id": int(service["ID"]),
+ "service_name": service["NAME"],
+ "group_id": int(service["GID"]),
+ "group_name": service["GNAME"],
+ "owner_id": int(service["UID"]),
+ "owner_name": service["UNAME"],
+ "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
+ }
+
+ roles_status = service["TEMPLATE"]["BODY"]["roles"]
+ roles = []
+ for role in roles_status:
+ nodes_ids = []
+ if "nodes" in role:
+ for node in role["nodes"]:
+ nodes_ids.append(node["deploy_id"])
+ roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
+
+ result["roles"] = roles
+ result["mode"] = int(parse_service_permissions(service))
+
+ return result
+
+
+def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
+ # make sure that the values in custom_attrs dict are strings
+ custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
+
+ data = {
+ "action": {
+ "perform": "instantiate",
+ "params": {
+ "merge_template": {
+ "custom_attrs_values": custom_attrs_with_str,
+ "name": service_name
+ }
+ }
+ }
+ }
+
+ try:
+ response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
+ data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ service_result = module.from_json(response.read())["DOCUMENT"]
+
+ return service_result
+
+
+def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
+
+ status_result = module.from_json(status_result.read())
+ service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
+
+ if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
+ return status_result["DOCUMENT"]
+ elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
+ log_message = ''
+ for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
+ if log_info["severity"] == "E":
+ log_message = log_message + log_info["message"]
+ break
+
+ module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired")
+
+
+def change_service_permissions(module, auth, service_id, permissions):
+
+ data = {
+ "action": {
+ "perform": "chmod",
+ "params": {"octet": permissions}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_owner(module, auth, service_id, owner_id):
+ data = {
+ "action": {
+ "perform": "chown",
+ "params": {"owner_id": owner_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_group(module, auth, service_id, group_id):
+
+ data = {
+ "action": {
+ "perform": "chgrp",
+ "params": {"group_id": group_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_role_cardinality(module, auth, service_id, role, cardinality, force):
+
+ data = {
+ "cardinality": cardinality,
+ "force": force
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if status_result.getcode() != 204:
+ module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
+
+
+def check_change_service_owner(module, service, owner_id):
+ old_owner_id = int(service["UID"])
+
+ return old_owner_id != owner_id
+
+
+def check_change_service_group(module, service, group_id):
+ old_group_id = int(service["GID"])
+
+ return old_group_id != group_id
+
+
+def parse_service_permissions(service):
+ perm_dict = service["PERMISSIONS"]
+ '''
+ This is the structure of the 'PERMISSIONS' dictionary:
+
+ "PERMISSIONS": {
+ "OWNER_U": "1",
+ "OWNER_M": "1",
+ "OWNER_A": "0",
+ "GROUP_U": "0",
+ "GROUP_M": "0",
+ "GROUP_A": "0",
+ "OTHER_U": "0",
+ "OTHER_M": "0",
+ "OTHER_A": "0"
+ }
+ '''
+
+ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
+ group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
+ other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def check_change_service_permissions(module, service, permissions):
+ old_permissions = parse_service_permissions(service)
+
+ return old_permissions != permissions
+
+
+def check_change_role_cardinality(module, service, role_name, cardinality):
+ roles_list = service["TEMPLATE"]["BODY"]["roles"]
+
+ for role in roles_list:
+ if role["name"] == role_name:
+ return int(role["cardinality"]) != cardinality
+
+ module.fail_json(msg="There is no role with name: " + role_name)
+
+
+def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
+ if not service_name:
+ service_name = ''
+ changed = False
+ service = None
+
+ if unique:
+ service = get_service_by_name(module, auth, service_name)
+
+ if not service:
+ if not module.check_mode:
+ service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
+ changed = True
+
+ # if check_mode=true and there would be changes, service doesn't exist and we can not get it
+ if module.check_mode and changed:
+ return {"changed": True}
+
+ result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
+ wait_timeout=wait_timeout, permissions=permissions, service=service)
+
+ if result["changed"]:
+ changed = True
+
+ result["changed"] = changed
+
+ return result
+
+
+def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
+ role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
+
+ changed = False
+
+ if not service:
+ service = get_service_by_id(module, auth, service_id)
+ else:
+ service_id = service["ID"]
+
+ if not service:
+ module.fail_json(msg="There is no service with id: " + str(service_id))
+
+ if owner_id:
+ if check_change_service_owner(module, service, owner_id):
+ if not module.check_mode:
+ change_service_owner(module, auth, service_id, owner_id)
+ changed = True
+ if group_id:
+ if check_change_service_group(module, service, group_id):
+ if not module.check_mode:
+ change_service_group(module, auth, service_id, group_id)
+ changed = True
+ if permissions:
+ if check_change_service_permissions(module, service, permissions):
+ if not module.check_mode:
+ change_service_permissions(module, auth, service_id, permissions)
+ changed = True
+
+ if role:
+ if check_change_role_cardinality(module, service, role, cardinality):
+ if not module.check_mode:
+ change_role_cardinality(module, auth, service_id, role, cardinality, force)
+ changed = True
+
+ if wait and not module.check_mode:
+ service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
+
+ # if something has changed, fetch service info again
+ if changed:
+ service = get_service_by_id(module, auth, service_id)
+
+ service_info = get_service_info(module, auth, service)
+ service_info["changed"] = changed
+
+ return service_info
+
+
+def delete_service(module, auth, service_id):
+ service = get_service_by_id(module, auth, service_id)
+ if not service:
+ return {"changed": False}
+
+ service_info = get_service_info(module, auth, service)
+
+ service_info["changed"] = True
+
+ if module.check_mode:
+ return service_info
+
+ try:
+ result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
+
+ return service_info
+
+
+def get_template_by_name(module, auth, template_name):
+ return get_template(module, auth, lambda template: (template["NAME"] == template_name))
+
+
+def get_template_by_id(module, auth, template_id):
+ return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
+
+
+def get_template_id(module, auth, requested_id, requested_name):
+ template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
+
+ if template:
+ return template["ID"]
+
+ return None
+
+
+def get_service_id_by_name(module, auth, service_name):
+ service = get_service_by_name(module, auth, service_name)
+
+ if service:
+ return service["ID"]
+
+ return None
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONEFLOW_URL')
+
+ if not username:
+ username = os.environ.get('ONEFLOW_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONEFLOW_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'user', 'password'))
+
+ return auth_params(url=url, user=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "service_name": {"required": False, "type": "str"},
+ "service_id": {"required": False, "type": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "unique": {"default": False, "type": "bool"},
+ "wait": {"default": False, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "custom_attrs": {"default": {}, "type": "dict"},
+ "role": {"required": False, "type": "str"},
+ "cardinality": {"required": False, "type": "int"},
+ "force": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'service_id'],
+ ['service_id', 'service_name'],
+ ['template_id', 'template_name', 'role'],
+ ['template_id', 'template_name', 'cardinality'],
+ ['service_id', 'custom_attrs']
+ ],
+ required_together=[['role', 'cardinality']],
+ supports_check_mode=True)
+
+ auth = get_connection_info(module)
+ params = module.params
+ service_name = params.get('service_name')
+ service_id = params.get('service_id')
+
+ requested_template_id = params.get('template_id')
+ requested_template_name = params.get('template_name')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ unique = params.get('unique')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ custom_attrs = params.get('custom_attrs')
+ role = params.get('role')
+ cardinality = params.get('cardinality')
+ force = params.get('force')
+
+ template_id = None
+
+ if requested_template_id or requested_template_name:
+ template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
+ if not template_id:
+ if requested_template_id:
+ module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ if unique and not service_name:
+ module.fail_json(msg="You cannot use unique without passing service_name!")
+
+ if template_id and state == 'absent':
+ module.fail_json(msg="State absent is not valid for template")
+
+ if template_id and state == 'present': # Instantiate a service
+ result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
+ group_id, permissions, custom_attrs, unique, wait, wait_timeout)
+ else:
+ if not (service_id or service_name):
+ module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
+ if custom_attrs:
+ module.fail_json(msg="You can only set custom_attrs when instantiate service!")
+
+ if not service_id:
+ service_id = get_service_id_by_name(module, auth, service_name)
+ # The task should be failed when we want to manage a non-existent service identified by its name
+ if not service_id and state == 'present':
+ module.fail_json(msg="There is no service with name: " + service_name)
+
+ if state == 'absent':
+ result = delete_service(module, auth, service_id)
+ else:
+ result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
new file mode 100644
index 00000000..286514bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
@@ -0,0 +1,1599 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_vm
+short_description: Creates or terminates OpenNebula instances
+description:
+ - Manages OpenNebula instances
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - if both I(api_username) or I(api_password) are not set, then it will try
+ - authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable C(ONE_AUTH) to override this path.
+ type: str
+ template_name:
+ description:
+ - Name of VM template to use to create a new instace
+ type: str
+ template_id:
+ description:
+ - ID of a VM template to use to create a new instance
+ type: int
+ vm_start_on_hold:
+ description:
+ - Set to true to put vm on hold while creating
+ default: False
+ type: bool
+ instance_ids:
+ description:
+ - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ aliases: ['ids']
+ type: list
+ state:
+ description:
+ - C(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - C(running) - run instances
+ - C(poweredoff) - power-off instances
+ - C(rebooted) - reboot instances
+ - C(absent) - terminate instances
+ choices: ["present", "absent", "running", "rebooted", "poweredoff"]
+ default: present
+ type: str
+ hard:
+ description:
+ - Reboot, power-off or terminate instances C(hard)
+ default: no
+ type: bool
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning. Keep
+ - in mind if you are waiting for instance to be in running state it
+ - doesn't mean that you will be able to SSH on that machine only that
+ - boot process have started on that instance, see 'wait_for' example for
+ - details.
+ default: yes
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ attributes:
+ description:
+ - A dictionary of key/value attributes to add to new instances, or for
+ - setting C(state) of instances with these attributes.
+ - Keys are case insensitive and OpenNebula automatically converts them to upper case.
+ - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
+ - indexes to the names of VMs.
+ - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
+ - When used with C(count_attributes) and C(exact_count) the module will
+ - match the base name without the index part.
+ default: {}
+ type: dict
+ labels:
+ description:
+ - A list of labels to associate with new instances, or for setting
+ - C(state) of instances with these labels.
+ default: []
+ type: list
+ count_attributes:
+ description:
+ - A dictionary of key/value attributes that can only be used with
+ - C(exact_count) to determine how many nodes based on a specific
+ - attributes criteria should be deployed. This can be expressed in
+ - multiple ways and is shown in the EXAMPLES section.
+ type: dict
+ count_labels:
+ description:
+ - A list of labels that can only be used with C(exact_count) to determine
+ - how many nodes based on a specific labels criteria should be deployed.
+ - This can be expressed in multiple ways and is shown in the EXAMPLES
+ - section.
+ type: list
+ count:
+ description:
+ - Number of instances to launch
+ default: 1
+ type: int
+ exact_count:
+ description:
+ - Indicates how many instances that match C(count_attributes) and
+ - C(count_labels) parameters should be deployed. Instances are either
+ - created or terminated based on this value.
+ - NOTE':' Instances with the least IDs will be terminated first.
+ type: int
+ mode:
+ description:
+ - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the instance
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the instance
+ type: int
+ memory:
+ description:
+ - The size of the memory for new instances (in MB, GB, ...)
+ type: str
+ disk_size:
+ description:
+ - The size of the disk created for new instances (in MB, GB, TB,...).
+ - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
+ - matched against the order specified in C(template_id)/C(template_name).
+ type: list
+ cpu:
+ description:
+ - Percentage of CPU divided by 100 required for the new instance. Half a
+ - processor is written 0.5.
+ type: float
+ vcpu:
+ description:
+ - Number of CPUs (cores) new VM will have.
+ type: int
+ networks:
+ description:
+ - A list of dictionaries with network parameters. See examples for more details.
+ default: []
+ type: list
+ disk_saveas:
+ description:
+ - Creates an image from a VM disk.
+ - It is a dictionary where you have to specify C(name) of the new image.
+ - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
+ - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
+ - and the VM has to be in the C(poweredoff) state.
+ - Also this operation will fail if an image with specified C(name) already exists.
+ type: dict
+ persistent:
+ description:
+ - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
+ default: NO
+ type: bool
+ version_added: '0.2.0'
+ datastore_id:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: int
+ datastore_name:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+
+EXAMPLES = '''
+- name: Create a new instance
+ community.general.one_vm:
+ template_id: 90
+ register: result
+
+- name: Print VM properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Deploy a new VM on hold
+ community.general.one_vm:
+ template_name: 'app1_template'
+ vm_start_on_hold: 'True'
+
+- name: Deploy a new VM and set its name to 'foo'
+ community.general.one_vm:
+ template_name: 'app1_template'
+ attributes:
+ name: foo
+
+- name: Deploy a new VM and set its group_id and mode
+ community.general.one_vm:
+ template_id: 90
+ group_id: 16
+ mode: 660
+
+- name: Deploy a new VM as persistent
+ community.general.one_vm:
+ template_id: 90
+ persistent: yes
+
+- name: Change VM's permissions to 640
+ community.general.one_vm:
+ instance_ids: 5
+ mode: 640
+
+- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
+ community.general.one_vm:
+ template_id: 15
+ disk_size: 35.2 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 2
+ networks:
+ - NETWORK_ID: 27
+ - NETWORK: "default-network"
+ NETWORK_UNAME: "app-user"
+ SECURITY_GROUPS: "120,124"
+ - NETWORK_ID: 27
+ SECURITY_GROUPS: "10"
+
+- name: Deploy a new instance which uses a Template with two Disks
+ community.general.one_vm:
+ template_id: 42
+ disk_size:
+ - 35.2 GB
+ - 50 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 1
+ networks:
+ - NETWORK_ID: 27
+
+- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: foo
+ bar: bar1
+
+- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ foo1: app1
+ foo2: app2
+ exact_count: 2
+ count_attributes:
+ foo1: app1
+ foo2: app2
+
+- name: Enforce that 4 instances with an attribute 'bar' are deployed
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: app
+ bar: bar2
+ exact_count: 4
+ count_attributes:
+ bar:
+
+# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
+# Names will be: fooapp-00 and fooapp-01
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-##
+ foo: bar
+ labels:
+ - app1
+ - app2
+ count: 2
+
+# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
+# Names will be: fooapp-002 and fooapp-003
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-###
+ app: app1
+ count: 2
+
+# Reboot all instances with name in format 'fooapp-#'
+# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
+- name: Reboot all instances with names in a certain format
+ community.general.one_vm:
+ attributes:
+ name: fooapp-#
+ state: rebooted
+
+# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
+# The task will delete oldest instances, so only the 'fooapp-003' will remain
+- name: Enforce that only 1 instance with name in a certain format is deployed
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 1
+ count_attributes:
+ name: fooapp-#
+
+- name: Deploy an new instance with a network
+ community.general.one_vm:
+ template_id: 53
+ networks:
+ - NETWORK_ID: 27
+ register: vm
+
+- name: Wait for SSH to come up
+ ansible.builtin.wait_for_connection:
+ delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+
+- name: Terminate VMs by ids
+ community.general.one_vm:
+ instance_ids:
+ - 153
+ - 160
+ state: absent
+
+- name: Reboot all VMs that have labels 'foo' and 'app1'
+ community.general.one_vm:
+ labels:
+ - foo
+ - app1
+ state: rebooted
+
+- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
+ community.general.one_vm:
+ attributes:
+ name: foo
+ app: bar
+ register: results
+
+- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ - foo2
+ count: 2
+
+- name: Enforce that only 1 instance with label 'foo1' will be running
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ exact_count: 1
+ count_labels:
+ - foo1
+
+- name: Terminate all instances that have attribute foo
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 0
+ count_attributes:
+ foo:
+
+- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ state: poweredoff
+ disk_saveas:
+ name: foo-image
+
+- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ disk_saveas:
+ name: bar-image
+ disk_id: 1
+'''
+
+RETURN = '''
+instances_ids:
+ description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ type: list
+ returned: success
+ sample: [ 1234, 1235 ]
+instances:
+ description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's owner name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: str
+ sample: 20480 MB
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+tagged_instances:
+ description:
+ - A list of instances info based on a specific attributes and/or
+ - labels that are specified with C(count_attributes) and C(count_labels)
+ - options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's user id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's user name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: list
+ sample: [
+ "20480 MB",
+ "10240 MB"
+ ]
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_template(module, client, predicate):
+
+ pool = client.templatepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all templates user can Use
+ found = 0
+ found_template = None
+ template_name = ''
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ found = found + 1
+ found_template = template
+ template_name = template.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more templates with name: ' + template_name)
+ return found_template
+
+
+def get_template_by_name(module, client, template_name):
+ return get_template(module, client, lambda template: (template.NAME == template_name))
+
+
+def get_template_by_id(module, client, template_id):
+ return get_template(module, client, lambda template: (template.ID == template_id))
+
+
+def get_template_id(module, client, requested_id, requested_name):
+ template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
+ if template:
+ return template.ID
+ else:
+ return None
+
+
+def get_datastore(module, client, predicate):
+ pool = client.datastorepool.info()
+ found = 0
+ found_datastore = None
+ datastore_name = ''
+
+ for datastore in pool.DATASTORE:
+ if predicate(datastore):
+ found = found + 1
+ found_datastore = datastore
+ datastore_name = datastore.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more datastores with name: ' + datastore_name)
+ return found_datastore
+
+
+def get_datastore_by_name(module, client, datastore_name):
+ return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
+
+
+def get_datastore_by_id(module, client, datastore_id):
+ return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
+
+
+def get_datastore_id(module, client, requested_id, requested_name):
+ datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
+ if datastore:
+ return datastore.ID
+ else:
+ return None
+
+
+def get_vm_by_id(client, vm_id):
+ try:
+ vm = client.vm.info(int(vm_id))
+ except BaseException:
+ return None
+ return vm
+
+
+def get_vms_by_ids(module, client, state, ids):
+ vms = []
+
+ for vm_id in ids:
+ vm = get_vm_by_id(client, vm_id)
+ if vm is None and state != 'absent':
+ module.fail_json(msg='There is no VM with id=' + str(vm_id))
+ vms.append(vm)
+
+ return vms
+
+
+def get_vm_info(client, vm):
+
+ vm = client.vm.info(vm.ID)
+
+ networks_info = []
+
+ disk_size = []
+ if 'DISK' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['DISK'], list):
+ for disk in vm.TEMPLATE['DISK']:
+ disk_size.append(disk['SIZE'] + ' MB')
+ else:
+ disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
+
+ if 'NIC' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['NIC'], list):
+ for nic in vm.TEMPLATE['NIC']:
+ networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
+ else:
+ networks_info.append(
+ {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
+ 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
+ import time
+
+ current_time = time.localtime()
+ vm_start_time = time.localtime(vm.STIME)
+
+ vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
+ vm_uptime /= (60 * 60)
+
+ permissions_str = parse_vm_permissions(client, vm)
+
+ # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
+ vm_lcm_state = None
+ if vm.STATE == VM_STATES.index('ACTIVE'):
+ vm_lcm_state = LCM_STATES[vm.LCM_STATE]
+
+ vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ info = {
+ 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
+ 'vm_id': vm.ID,
+ 'vm_name': vm.NAME,
+ 'state': VM_STATES[vm.STATE],
+ 'lcm_state': vm_lcm_state,
+ 'owner_name': vm.UNAME,
+ 'owner_id': vm.UID,
+ 'networks': networks_info,
+ 'disk_size': disk_size,
+ 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
+ 'vcpu': vm.TEMPLATE['VCPU'],
+ 'cpu': vm.TEMPLATE['CPU'],
+ 'group_name': vm.GNAME,
+ 'group_id': vm.GID,
+ 'uptime_h': int(vm_uptime),
+ 'attributes': vm_attributes,
+ 'mode': permissions_str,
+ 'labels': vm_labels
+ }
+
+ return info
+
+
+def parse_vm_permissions(client, vm):
+ vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
+
+ owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
+ group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
+ other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def set_vm_permissions(module, client, vms, permissions):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ old_permissions = parse_vm_permissions(client, vm)
+ changed = changed or old_permissions != permissions
+
+ if not module.check_mode and old_permissions != permissions:
+ permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
+ mode_bits = [int(d) for d in permissions_str]
+ try:
+ client.vm.chmod(
+ vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def set_vm_ownership(module, client, vms, owner_id, group_id):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ if owner_id is None:
+ owner_id = vm.UID
+ if group_id is None:
+ group_id = vm.GID
+
+ changed = changed or owner_id != vm.UID or group_id != vm.GID
+
+ if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
+ try:
+ client.vm.chown(vm.ID, owner_id, group_id)
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def get_size_in_MB(module, size_str):
+
+ SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ s = size_str
+ init = size_str
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ symbol = s.strip()
+
+ if symbol not in SYMBOLS:
+ module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
+
+ prefix = {'B': 1}
+
+ for i, s in enumerate(SYMBOLS[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+
+ size_in_bytes = int(num * prefix[symbol])
+ size_in_MB = size_in_bytes / (1024 * 1024)
+
+ return size_in_MB
+
+
+def create_disk_str(module, client, template_id, disk_size_list):
+
+ if not disk_size_list:
+ return ''
+
+ template = client.template.info(template_id)
+ if isinstance(template.TEMPLATE['DISK'], list):
+ # check if the number of disks is correct
+ if len(template.TEMPLATE['DISK']) != len(disk_size_list):
+ module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
+ result = ''
+ index = 0
+ for DISKS in template.TEMPLATE['DISK']:
+ disk = {}
+ diskresult = ''
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in DISKS.items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
+ result += diskresult
+ index += 1
+ else:
+ if len(disk_size_list) > 1:
+ module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
+ disk = {}
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in template.TEMPLATE['DISK'].items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
+
+ return result
+
+
+def create_attributes_str(attributes_dict, labels_list):
+
+ attributes_str = ''
+
+ if labels_list:
+ attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
+ if attributes_dict:
+ attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
+
+ return attributes_str
+
+
+def create_nics_str(network_attrs_list):
+ nics_str = ''
+
+ for network in network_attrs_list:
+ # Packing key-value dict in string with format key="value", key="value"
+ network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
+ nics_str = nics_str + 'NIC = [' + network_str + ']\n'
+
+ return nics_str
+
+
+def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
+
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ disk_str = create_disk_str(module, client, template_id, disk_size)
+ vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
+ try:
+ vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ vm = get_vm_by_id(client, vm_id)
+
+ return get_vm_info(client, vm)
+
+
+def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
+ counter = 0
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ while cnt_str in vm_filled_indexes_list:
+ counter = counter + 1
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ return cnt_str
+
+
+def get_vm_labels_and_attributes_dict(client, vm_id):
+ vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
+
+ attrs_dict = {}
+ labels_list = []
+
+ for key, value in vm_USER_TEMPLATE.items():
+ if key != 'LABELS':
+ attrs_dict[key] = value
+ else:
+ if key is not None:
+ labels_list = value.split(',')
+
+ return labels_list, attrs_dict
+
+
+def get_all_vms_by_attributes(client, attributes_dict, labels_list):
+ pool = client.vmpool.info(-2, -1, -1, -1).VM
+ vm_list = []
+ name = ''
+ if attributes_dict:
+ name = attributes_dict.pop('NAME', '')
+
+ if name != '':
+ base_name = name[:len(name) - name.count('#')]
+ # Check does the name have indexed format
+ with_hash = name.endswith('#')
+
+ for vm in pool:
+ if vm.NAME.startswith(base_name):
+ if with_hash and vm.NAME[len(base_name):].isdigit():
+ # If the name has indexed format and after base_name it has only digits it'll be matched
+ vm_list.append(vm)
+ elif not with_hash and vm.NAME == name:
+ # If the name is not indexed it has to be same
+ vm_list.append(vm)
+ pool = vm_list
+
+ import copy
+
+ vm_list = copy.copy(pool)
+
+ for vm in pool:
+ remove_list = []
+ vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ if attributes_dict and len(attributes_dict) > 0:
+ for key, val in attributes_dict.items():
+ if key in vm_attributes_dict:
+ if val and vm_attributes_dict[key] != val:
+ remove_list.append(vm)
+ break
+ else:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ remove_list = []
+ if labels_list and len(labels_list) > 0:
+ for label in labels_list:
+ if label not in vm_labels_list:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ return vm_list
+
+
+def create_count_of_vms(
+ module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+ new_vms_list = []
+
+ vm_name = ''
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ if module.check_mode:
+ return True, [], []
+
+ # Create list of used indexes
+ vm_filled_indexes_list = None
+ num_sign_cnt = vm_name.count('#')
+ if vm_name != '' and num_sign_cnt > 0:
+ vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
+ base_name = vm_name[:len(vm_name) - num_sign_cnt]
+ vm_name = base_name
+ # Make list which contains used indexes in format ['000', '001',...]
+ vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
+
+ while count > 0:
+ new_vm_name = vm_name
+ # Create indexed name
+ if vm_filled_indexes_list is not None:
+ next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
+ vm_filled_indexes_list.append(next_index)
+ new_vm_name += next_index
+ # Update NAME value in the attributes in case there is index
+ attributes_dict['NAME'] = new_vm_name
+ new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
+ new_vm_id = new_vm_dict.get('vm_id')
+ new_vm = get_vm_by_id(client, new_vm_id)
+ new_vms_list.append(new_vm)
+ count -= 1
+
+ if vm_start_on_hold:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_hold(module, client, vm, wait_timeout)
+ else:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_running(module, client, vm, wait_timeout)
+
+ return True, new_vms_list, []
+
+
+def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
+ labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+
+ vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
+
+ vm_count_diff = exact_count - len(vm_list)
+ changed = vm_count_diff != 0
+
+ new_vms_list = []
+ instances_list = []
+ tagged_instances_list = vm_list
+
+ if module.check_mode:
+ return changed, instances_list, tagged_instances_list
+
+ if vm_count_diff > 0:
+ # Add more VMs
+ changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
+ labels_list, disk_size, network_attrs_list, wait, wait_timeout,
+ vm_start_on_hold, vm_persistent)
+
+ tagged_instances_list += instances_list
+ elif vm_count_diff < 0:
+ # Delete surplus VMs
+ old_vms_list = []
+
+ while vm_count_diff < 0:
+ old_vm = vm_list.pop(0)
+ old_vms_list.append(old_vm)
+ terminate_vm(module, client, old_vm, hard)
+ vm_count_diff += 1
+
+ if wait:
+ for vm in old_vms_list:
+ wait_for_done(module, client, vm, wait_timeout)
+
+ instances_list = old_vms_list
+ # store only the remaining instances
+ old_vms_set = set(old_vms_list)
+ tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
+
+ return changed, instances_list, tagged_instances_list
+
+
+VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
+LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
+ 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
+ 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
+ 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
+ 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
+
+
+def wait_for_state(module, client, vm, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ vm = client.vm.info(vm.ID)
+ state = vm.STATE
+ lcm_state = vm.LCM_STATE
+
+ if state_predicate(state, lcm_state):
+ return vm
+ elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
+ VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
+ module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_running(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state,
+ lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
+
+
+def wait_for_done(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
+
+
+def wait_for_hold(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
+
+
+def wait_for_poweroff(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
+
+
+def terminate_vm(module, client, vm, hard=False):
+ changed = False
+
+ if not vm:
+ return changed
+
+ changed = True
+
+ if not module.check_mode:
+ if hard:
+ client.vm.action('terminate-hard', vm.ID)
+ else:
+ client.vm.action('terminate', vm.ID)
+
+ return changed
+
+
+def terminate_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = terminate_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def poweroff_vm(module, client, vm, hard):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ if not hard:
+ client.vm.action('poweroff', vm.ID)
+ else:
+ client.vm.action('poweroff-hard', vm.ID)
+
+ return changed
+
+
+def poweroff_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = poweroff_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def reboot_vms(module, client, vms, wait_timeout, hard):
+
+ if not module.check_mode:
+ # Firstly, power-off all instances
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ poweroff_vm(module, client, vm, hard)
+
+ # Wait for all to be power-off
+ for vm in vms:
+ wait_for_poweroff(module, client, vm, wait_timeout)
+
+ for vm in vms:
+ resume_vm(module, client, vm)
+
+ return True
+
+
+def resume_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
+ module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
+ "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
+ if lcm_state not in [LCM_STATES.index('RUNNING')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('resume', vm.ID)
+
+ return changed
+
+
+def resume_vms(module, client, vms):
+ changed = False
+
+ for vm in vms:
+ changed = resume_vm(module, client, vm) or changed
+
+ return changed
+
+
+def check_name_attribute(module, attributes):
+ if attributes.get("NAME"):
+ import re
+ if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
+ module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
+
+
+TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
+ "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
+ "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
+
+
+def check_attributes(module, attributes):
+ for key in attributes.keys():
+ if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
+ module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
+ # Check the format of the name attribute
+ check_name_attribute(module, attributes)
+
+
+def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
+ if not disk_saveas.get('name'):
+ module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
+
+ image_name = disk_saveas.get('name')
+ disk_id = disk_saveas.get('disk_id', 0)
+
+ if not module.check_mode:
+ if vm.STATE != VM_STATES.index('POWEROFF'):
+ module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
+ try:
+ client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not username:
+ if not password:
+ authfile = os.environ.get('ONE_AUTH')
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username = authstring.split(":")[0]
+ password = authstring.split(":")[1]
+ except (OSError, IOError):
+ module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
+ except Exception:
+ module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
+ if not url:
+ module.fail_json(msg="Opennebula API url (api_url) is not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "vm_start_on_hold": {"default": False, "type": "bool"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "wait": {"default": True, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "hard": {"default": False, "type": "bool"},
+ "memory": {"required": False, "type": "str"},
+ "cpu": {"required": False, "type": "float"},
+ "vcpu": {"required": False, "type": "int"},
+ "disk_size": {"required": False, "type": "list"},
+ "datastore_name": {"required": False, "type": "str"},
+ "datastore_id": {"required": False, "type": "int"},
+ "networks": {"default": [], "type": "list"},
+ "count": {"default": 1, "type": "int"},
+ "exact_count": {"required": False, "type": "int"},
+ "attributes": {"default": {}, "type": "dict"},
+ "count_attributes": {"required": False, "type": "dict"},
+ "labels": {"default": [], "type": "list"},
+ "count_labels": {"required": False, "type": "list"},
+ "disk_saveas": {"type": "dict"},
+ "persistent": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'instance_ids'],
+ ['template_id', 'template_name', 'disk_saveas'],
+ ['instance_ids', 'count_attributes', 'count'],
+ ['instance_ids', 'count_labels', 'count'],
+ ['instance_ids', 'exact_count'],
+ ['instance_ids', 'attributes'],
+ ['instance_ids', 'labels'],
+ ['disk_saveas', 'attributes'],
+ ['disk_saveas', 'labels'],
+ ['exact_count', 'count'],
+ ['count', 'hard'],
+ ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
+ ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
+ ['instance_ids', 'networks'],
+ ['persistent', 'disk_size']
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ instance_ids = params.get('instance_ids')
+ requested_template_name = params.get('template_name')
+ requested_template_id = params.get('template_id')
+ put_vm_on_hold = params.get('vm_start_on_hold')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ hard = params.get('hard')
+ memory = params.get('memory')
+ cpu = params.get('cpu')
+ vcpu = params.get('vcpu')
+ disk_size = params.get('disk_size')
+ requested_datastore_id = params.get('datastore_id')
+ requested_datastore_name = params.get('datastore_name')
+ networks = params.get('networks')
+ count = params.get('count')
+ exact_count = params.get('exact_count')
+ attributes = params.get('attributes')
+ count_attributes = params.get('count_attributes')
+ labels = params.get('labels')
+ count_labels = params.get('count_labels')
+ disk_saveas = params.get('disk_saveas')
+ persistent = params.get('persistent')
+
+ if not (auth.username and auth.password):
+ module.warn("Credentials missing")
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if attributes:
+ attributes = dict((key.upper(), value) for key, value in attributes.items())
+ check_attributes(module, attributes)
+
+ if count_attributes:
+ count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
+ if not attributes:
+ import copy
+ module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
+ attributes = copy.copy(count_attributes)
+ check_attributes(module, count_attributes)
+
+ if count_labels and not labels:
+ module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
+ labels = count_labels
+
+ # Fetch template
+ template_id = None
+ if requested_template_id is not None or requested_template_name:
+ template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
+ if template_id is None:
+ if requested_template_id is not None:
+ module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ # Fetch datastore
+ datastore_id = None
+ if requested_datastore_id or requested_datastore_name:
+ datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
+ if datastore_id is None:
+ if requested_datastore_id:
+ module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
+ elif requested_datastore_name:
+ module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
+ else:
+ attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
+
+ if exact_count and template_id is None:
+ module.fail_json(msg='Option `exact_count` needs template_id or template_name')
+
+ if exact_count is not None and not (count_attributes or count_labels):
+ module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
+ if (count_attributes or count_labels) and exact_count is None:
+ module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
+ if template_id is not None and state != 'present':
+ module.fail_json(msg="Only state 'present' is valid for the template")
+
+ if memory:
+ attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
+ if cpu:
+ attributes['CPU'] = str(cpu)
+ if vcpu:
+ attributes['VCPU'] = str(vcpu)
+
+ if exact_count is not None and state != 'present':
+ module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
+ if exact_count is not None and exact_count < 0:
+ module.fail_json(msg='`exact_count` cannot be less than 0')
+ if count <= 0:
+ module.fail_json(msg='`count` has to be greater than 0')
+
+ if permissions is not None:
+ import re
+ if re.match("^[0-7]{3}$", permissions) is None:
+ module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
+
+ if exact_count is not None:
+ # Deploy an exact count of VMs
+ changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
+ count_attributes, labels, count_labels, disk_size,
+ networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
+ vms = tagged_instances_list
+ elif template_id is not None and state == 'present':
+ # Deploy count VMs
+ changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
+ attributes, labels, disk_size, networks, wait, wait_timeout,
+ put_vm_on_hold, persistent)
+ # instances_list - new instances
+ # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
+ vms = instances_list
+ else:
+ # Fetch data of instances, or change their state
+ if not (instance_ids or attributes or labels):
+ module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
+
+ if memory or cpu or vcpu or disk_size or networks:
+ module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
+
+ if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
+ module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
+
+ vms = []
+ tagged = False
+ changed = False
+
+ if instance_ids:
+ vms = get_vms_by_ids(module, one_client, state, instance_ids)
+ else:
+ tagged = True
+ vms = get_all_vms_by_attributes(one_client, attributes, labels)
+
+ if len(vms) == 0 and state != 'absent' and state != 'present':
+ module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
+
+ if len(vms) == 0 and state == 'present' and not tagged:
+ module.fail_json(msg='There are no instances with specified `instance_ids`.')
+
+ if tagged and state == 'absent':
+ module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
+
+ if state == 'absent':
+ changed = terminate_vms(module, one_client, vms, hard)
+ elif state == 'rebooted':
+ changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
+ elif state == 'poweredoff':
+ changed = poweroff_vms(module, one_client, vms, hard)
+ elif state == 'running':
+ changed = resume_vms(module, one_client, vms)
+
+ instances_list = vms
+ tagged_instances_list = []
+
+ if permissions is not None:
+ changed = set_vm_permissions(module, one_client, vms, permissions) or changed
+
+ if owner_id is not None or group_id is not None:
+ changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
+
+ if wait and not module.check_mode and state != 'present':
+ wait_for = {
+ 'absent': wait_for_done,
+ 'rebooted': wait_for_running,
+ 'poweredoff': wait_for_poweroff,
+ 'running': wait_for_running
+ }
+ for vm in vms:
+ if vm is not None:
+ wait_for[state](module, one_client, vm, wait_timeout)
+
+ if disk_saveas is not None:
+ if len(vms) == 0:
+ module.fail_json(msg="There is no VM whose disk will be saved.")
+ disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
+ changed = True
+
+ # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
+ instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
+ instances_ids = list(vm.ID for vm in instances_list if vm is not None)
+ # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
+ tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
+
+ result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
new file mode 100644
index 00000000..06dc4af0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oci_vcn
+short_description: Manage Virtual Cloud Networks(VCN) in OCI
+description:
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
+ The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
+ U(https://github.com/oracle/oci-ansible-modules/releases).
+options:
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
+ This option is mutually exclusive with I(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it's changeable.
+ type: str
+ aliases: [ 'name' ]
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
+ form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
+ bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
+ to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
+ with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
+ with I(state=present). This option is mutually exclusive with I(compartment_id).
+ type: str
+ aliases: [ 'id' ]
+author: "Rohit Chaware (@rohitChaware)"
+extends_documentation_fragment:
+- community.general.oracle
+- community.general.oracle_creatable_resource
+- community.general.oracle_wait_options
+- community.general.oracle_tags
+
+'''
+
+EXAMPLES = """
+- name: Create a VCN
+ community.general.oci_vcn:
+ cidr_block: '10.0.0.0/16'
+ compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
+ display_name: my_vcn
+ dns_label: ansiblevcn
+
+- name: Updates the specified VCN's display name
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ display_name: ansible_vcn
+
+- name: Delete the specified VCN
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ state: absent
+"""
+
+RETURN = """
+vcn:
+ description: Information about the VCN
+ returned: On successful create and update operation
+ type: dict
+ sample: {
+ "cidr_block": "10.0.0.0/16",
+ compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
+
+try:
+ from oci.core.virtual_network_client import VirtualNetworkClient
+ from oci.core.models import CreateVcnDetails
+ from oci.core.models import UpdateVcnDetails
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+def delete_vcn(virtual_network_client, module):
+ result = oci_utils.delete_and_wait(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ delete_fn=virtual_network_client.delete_vcn,
+ kwargs_delete={"vcn_id": module.params["vcn_id"]},
+ module=module,
+ )
+ return result
+
+
+def update_vcn(virtual_network_client, module):
+ result = oci_utils.check_and_update_resource(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ update_fn=virtual_network_client.update_vcn,
+ primitive_params_update=["vcn_id"],
+ kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
+ module=module,
+ update_attributes=UpdateVcnDetails().attribute_map.keys(),
+ )
+ return result
+
+
+def create_vcn(virtual_network_client, module):
+ create_vcn_details = CreateVcnDetails()
+ for attribute in create_vcn_details.attribute_map.keys():
+ if attribute in module.params:
+ setattr(create_vcn_details, attribute, module.params[attribute])
+
+ result = oci_utils.create_and_wait(
+ resource_type="vcn",
+ create_fn=virtual_network_client.create_vcn,
+ kwargs_create={"create_vcn_details": create_vcn_details},
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ get_param="vcn_id",
+ module=module,
+ )
+ return result
+
+
+def main():
+ module_args = oci_utils.get_taggable_arg_spec(
+ supports_create=True, supports_wait=True
+ )
+ module_args.update(
+ dict(
+ cidr_block=dict(type="str", required=False),
+ compartment_id=dict(type="str", required=False),
+ display_name=dict(type="str", required=False, aliases=["name"]),
+ dns_label=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["absent", "present"],
+ ),
+ vcn_id=dict(type="str", required=False, aliases=["id"]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ mutually_exclusive=[["compartment_id", "vcn_id"]],
+ )
+
+ if not HAS_OCI_PY_SDK:
+ module.fail_json(msg=missing_required_lib("oci"))
+
+ virtual_network_client = oci_utils.create_service_client(
+ module, VirtualNetworkClient
+ )
+
+ exclude_attributes = {"display_name": True, "dns_label": True}
+ state = module.params["state"]
+ vcn_id = module.params["vcn_id"]
+
+ if state == "absent":
+ if vcn_id is not None:
+ result = delete_vcn(virtual_network_client, module)
+ else:
+ module.fail_json(
+ msg="Specify vcn_id with state as 'absent' to delete a VCN."
+ )
+
+ else:
+ if vcn_id is not None:
+ result = update_vcn(virtual_network_client, module)
+ else:
+ result = oci_utils.check_and_create_resource(
+ resource_type="vcn",
+ create_fn=create_vcn,
+ kwargs_create={
+ "virtual_network_client": virtual_network_client,
+ "module": module,
+ },
+ list_fn=virtual_network_client.list_vcns,
+ kwargs_list={"compartment_id": module.params["compartment_id"]},
+ module=module,
+ model=CreateVcnDetails(),
+ exclude_attributes=exclude_attributes,
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
new file mode 100644
index 00000000..7ed3a5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_failover
+short_description: Manage OVH IP failover address
+description:
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
+ an ip failover (or failover block) between services
+author: "Pascal HERAUD (@pascalheraud)"
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh >= 0.4.8
+options:
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like 1.1.1.1
+ or a block like 1.1.1.1/28 )
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If true, the module will wait for the IP address to be moved.
+ If false, exit without waiting. The taskId will be returned
+ in module output
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not 0, the module will wait for this task id to be
+ completed. Use wait_task_completion if you want to wait for
+ completion of a previously executed task with
+ wait_completion=false. You can execute this module repeatedly on
+ a list of failover IPs using wait_completion=false (see examples)
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+ type: int
+
+'''
+
+EXAMPLES = '''
+# Route an IP address 1.1.1.1 to the service ns666.ovh.net
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_completion: false
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+ register: moved
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_task_completion: "{{moved.taskId}}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote_plus
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while client.get('/ip/{0}/task'.format(quote_plus(name)),
+ function='genericMoveFloatingIp',
+ status='todo'):
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def waitForTaskDone(client, name, taskId, timeout):
+ currentTimeout = timeout
+ while True:
+ task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
+ if task['status'] == 'done':
+ return True
+ time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ currentTimeout -= 5
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service=dict(required=True),
+ endpoint=dict(required=True),
+ wait_completion=dict(default=True, type='bool'),
+ wait_task_completion=dict(default=0, type='int'),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ service = module.params.get('service')
+ timeout = module.params.get('timeout')
+ wait_completion = module.params.get('wait_completion')
+ wait_task_completion = module.params.get('wait_task_completion')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ ips = client.get('/ip', ip=name, type='failover')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of ips, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in ips and '{0}/32'.format(name) not in ips:
+ module.fail_json(msg='IP {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the properties '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if ipproperties['routedTo']['serviceName'] != service:
+ if not module.check_mode:
+ if wait_task_completion == 0:
+ # Move the IP and get the created taskId
+ task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
+ taskId = task['taskId']
+ result['moved'] = True
+ else:
+ # Just wait for the given taskId to be completed
+ taskId = wait_task_completion
+ result['moved'] = False
+ result['taskId'] = taskId
+ if wait_completion or wait_task_completion != 0:
+ if not waitForTaskDone(client, name, taskId, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of move ip to service'.format(timeout))
+ result['waited'] = True
+ else:
+ result['waited'] = False
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 00000000..965a499c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+author: Pascal Heraud (@pascalheraud)
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consumer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified
+ or deleted
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed.
+ type: int
+
+'''
+
+EXAMPLES = '''
+- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = module.params.get('weight')
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the probe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
new file mode 100644
index 00000000..75c70a79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Francois Lallart (@fraff)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovh_monthly_billing
+author: Francois Lallart (@fraff)
+version_added: '0.2.0'
+short_description: Manage OVH monthly billing
+description:
+ - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+requirements: [ "ovh" ]
+options:
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance ovh-eu)
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use
+ application_secret:
+ type: str
+ description:
+ - The application secret to use
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use
+'''
+
+EXAMPLES = '''
+- name: Basic usage, using auth from /etc/ovh.conf
+ community.general.ovh_monthly_billing:
+ project_id: 0c727a20aa144485b70c44dee9123b46
+ instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
+
+# Get openstack cloud ID and instance ID, OVH use them in its API
+- name: Get openstack cloud ID and instance ID
+ os_server_info:
+ cloud: myProjectName
+ region_name: myRegionName
+ server: myServerName
+ register: openstack_servers
+
+- name: Use IDs
+ community.general.ovh_monthly_billing:
+ project_id: "{{ openstack_servers.0.tenant_id }}"
+ instance_id: "{{ openstack_servers.0.id }}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import os
+import sys
+import traceback
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+ OVH_IMPORT_ERROR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True),
+ instance_id=dict(required=True),
+ endpoint=dict(required=False),
+ application_key=dict(required=False, no_log=True),
+ application_secret=dict(required=False, no_log=True),
+ consumer_key=dict(required=False, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get parameters
+ project_id = module.params.get('project_id')
+ instance_id = module.params.get('instance_id')
+ endpoint = module.params.get('endpoint')
+ application_key = module.params.get('application_key')
+ application_secret = module.params.get('application_secret')
+ consumer_key = module.params.get('consumer_key')
+ project = ""
+ instance = ""
+ ovh_billing_status = ""
+
+ if not HAS_OVH:
+ module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
+
+ # Connect to OVH API
+ client = ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+ # Check that the instance exists
+ try:
+ project = client.get('/cloud/project/{0}'.format(project_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='project {0} does not exist'.format(project_id))
+
+ # Check that the instance exists
+ try:
+ instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
+
+ # Is monthlyBilling already enabled or pending ?
+ if instance['monthlyBilling'] is not None:
+ if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
+ module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Dry Run!")
+
+ try:
+ ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
+ module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
+ except APIError as apiError:
+ module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
+
+ # We should never reach here
+ module.fail_json(msg='Internal ovh_monthly_billing module error')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
new file mode 100644
index 00000000..e560e13e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_facts
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all affinity labels, which names start with label
+ ovirt_affinity_label_info:
+ name: label*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to VMs
+ which names start with postgres
+ ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west
+ ovirt_affinity_label_info:
+ host: west*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west or VMs which names start with postgres
+ ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py
new file mode 100644
index 00000000..4085a702
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_facts
+short_description: Retrieve information about the oVirt/RHV API
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_api_info) instead.
+description:
+ - "Retrieve information about the oVirt/RHV API."
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information oVirt API
+ ovirt_api_info:
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
new file mode 100644
index 00000000..e4916a26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_facts
+short_description: Retrieve information about one or more oVirt/RHV clusters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all clusters which names start with production
+ ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
new file mode 100644
index 00000000..0de72729
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_facts
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all data centers which names start with production
+ ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py
new file mode 100644
index 00000000..6e0c9f69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_facts
+short_description: Retrieve information about one or more oVirt/RHV disks
+author: "Katerina Koukiou (@KKoukiou)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all Disks which names start with centos
+ ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py
new file mode 100644
index 00000000..50a20654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_facts
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+author: "Chris Keller (@nasx)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_event_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_event_facts', 'community.general.ovirt_event_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait']
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py
new file mode 100644
index 00000000..f9ac8b97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_facts
+short_description: Retrieve information about one or more oVirt/RHV external providers
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_external_provider_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all image external providers named glance
+ ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=['os_image', 'os_network', 'os_volume', 'foreman'],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_external_provider_facts', 'community.general.ovirt_external_provider_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py
new file mode 100644
index 00000000..40b037f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_facts
+short_description: Retrieve information about one or more oVirt/RHV groups
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_group_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all groups which names start with admin
+ ovirt_group_info:
+ pattern: name=admin*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_group_facts', 'community.general.ovirt_group_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py
new file mode 100644
index 00000000..ea585e90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_facts
+short_description: Retrieve information about one or more oVirt/RHV hosts
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all hosts which names start with host and belong to data center west
+ ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+
+- name: Gather information about all hosts with cluster version 4.2
+ ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_facts', 'community.general.ovirt_host_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content']
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py
new file mode 100644
index 00000000..62af3e4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_facts
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+author: "Daniel Erez (@derez)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_storage_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about HostStorages with specified target and address
+ ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def _get_storage_type(params):
+ for sd_type in ['iscsi', 'fcp']:
+ if params.get(sd_type) is not None:
+ return sd_type
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_storage_facts', 'community.general.ovirt_host_storage_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ storage_type = _get_storage_type(module.params)
+ host_service = hosts_service.host_service(host_id)
+
+ if storage_type == 'iscsi':
+ # Login
+ iscsi = module.params.get('iscsi')
+ _login(host_service, iscsi)
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list()
+
+ if storage_type == 'iscsi':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.ISCSI]
+ if 'target' in iscsi:
+ filterred_host_storages = [host_storage for host_storage in filterred_host_storages
+ if iscsi.get('target') == host_storage.logical_units[0].target]
+ elif storage_type == 'fcp':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.FCP]
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in filterred_host_storages
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py
new file mode 100644
index 00000000..781dd858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_facts
+short_description: Retrieve information about one or more oVirt/RHV networks
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_network_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all networks which names start with vlan1
+ ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_network_facts', 'community.general.ovirt_network_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py
new file mode 100644
index 00000000..2cc1194f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_nic_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ required: true
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all NICs which names start with eth for VM named centos7
+ ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_nic_facts', 'community.general.ovirt_nic_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ nics_service = vms_service.service(vm.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py
new file mode 100644
index 00000000..52ba3624
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_facts
+short_description: Retrieve information about one or more oVirt/RHV permissions
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all permissions of user with username john
+ ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py
new file mode 100644
index 00000000..b2424305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_facts
+short_description: Retrieve information about one or more oVirt/RHV quotas
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_quota_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about quota named C<myquota> in Default datacenter
+ ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_quota_facts', 'community.general.ovirt_quota_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py
new file mode 100644
index 00000000..eeaeb610
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_facts
+short_description: Retrieve information about one or more oVirt scheduling policies
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_scheduling_policy_info) instead.
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all scheduling policies with name InClusterUpgrade
+ ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_scheduling_policy_facts', 'community.general.ovirt_scheduling_policy_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list()
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py
new file mode 100644
index 00000000..73746883
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_snapshot_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all snapshots which description start with update for VM named centos7
+ ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_snapshot_facts', 'community.general.ovirt_snapshot_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list()
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list()
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py
new file mode 100644
index 00000000..b9d814c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_facts
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_domain_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: >
+ Gather information about all storage domains which names
+ start with data and belong to data center west
+ ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_domain_facts', 'community.general.ovirt_storage_domain_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py
new file mode 100644
index 00000000..1c583278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which relate to a storage domain and are unregistered
+ ovirt_storage_template_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_template_facts', 'community.general.ovirt_storage_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(unregistered=True)
+ else:
+ templates = templates_service.list(max=module.params['max'])
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py
new file mode 100644
index 00000000..d0247948
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which relate to a storage domain and are unregistered
+ ovirt_vms_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_vm_facts', 'community.general.ovirt_storage_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True)
+ else:
+ vms = vms_service.list()
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py
new file mode 100644
index 00000000..c6e9b744
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_facts
+short_description: Retrieve information about one or more oVirt/RHV tags
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_tag_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all tags, which names start with tag
+ ovirt_tag_info:
+ name: tag*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to VM postgres
+ ovirt_tag_info:
+ vm: postgres
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to host west
+ ovirt_tag_info:
+ host: west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_tag_facts', 'community.general.ovirt_tag_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list()
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend([
+ tag for tag in hosts_service.host_service(host.id).tags_service().list()
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend([
+ tag for tag in vms_service.vm_service(vm.id).tags_service().list()
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py
new file mode 100644
index 00000000..7595c64a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which names start with centos and belongs to data center west
+ ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_template_facts', 'community.general.ovirt_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py
new file mode 100644
index 00000000..ce7ab8d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_facts
+short_description: Retrieve information about one or more oVirt/RHV users
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_user_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all users which first names start with john
+ ovirt_user_info:
+ pattern: name=john*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_user_facts', 'community.general.ovirt_user_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py
new file mode 100644
index 00000000..a5182755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ default: false
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which names start with centos and belong to cluster west
+ ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+- name: Gather info about next run configuration of virtual machine named myvm
+ ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vm_facts', 'community.general.ovirt_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py
new file mode 100644
index 00000000..24842be5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_facts
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vmpool_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all vm pools which names start with centos
+ ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vmpool_facts', 'community.general.ovirt_vmpool_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
new file mode 100644
index 00000000..c76530f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
@@ -0,0 +1,651 @@
+#!/usr/bin/python
+# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
+# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
+# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_device
+
+short_description: Manage a bare metal server in the Packet Host.
+
+description:
+ - Manage a bare metal server in the Packet Host (a "device" in the API terms).
+ - When the machine is created it can optionally wait for public IP address, or for active state.
+ - This module has a dependency on packet >= 1.0.
+ - API is documented at U(https://www.packet.net/developers/api/devices).
+
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+ - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+
+ count:
+ description:
+ - The number of devices to create. Count number can be included in hostname via the %d string formatter.
+ default: 1
+
+ count_offset:
+ description:
+ - From which number to start the count.
+ default: 1
+
+ device_ids:
+ description:
+ - List of device IDs on which to operate.
+
+ tags:
+ description:
+ - List of device tags.
+ - Currently implemented only for device creation.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+
+ facility:
+ description:
+ - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
+
+ features:
+ description:
+ - Dict with "features" for device creation. See Packet API docs for details.
+
+ hostnames:
+ description:
+ - A hostname of a device, or a list of hostnames.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
+ - If only one hostname, it might be expanded to list if I(count)>1.
+ aliases: [name]
+
+ locked:
+ description:
+ - Whether to lock a created device.
+ default: false
+ aliases: [lock]
+ type: bool
+
+ operating_system:
+ description:
+ - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
+
+ plan:
+ description:
+ - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+
+ state:
+ description:
+ - Desired state of the device.
+ - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ choices: [present, absent, active, inactive, rebooted]
+ default: present
+
+ user_data:
+ description:
+ - Userdata blob made available to the machine
+
+ wait_for_public_IPv:
+ description:
+ - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
+ - If set to 4, it will wait until IPv4 is assigned to the instance.
+ - If set to 6, wait until public IPv6 is assigned to the instance.
+ choices: [4,6]
+
+ wait_timeout:
+ description:
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
+ - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ default: 900
+ ipxe_script_url:
+ description:
+ - URL of custom iPXE script for provisioning.
+ - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
+ always_pxe:
+ description:
+ - Persist PXE as the first boot option.
+ - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
+ default: false
+ type: bool
+
+
+requirements:
+ - "packet-python >= 1.35"
+
+notes:
+ - Doesn't support check mode.
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+# Creating devices
+
+- name: Create 1 device
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ tags: ci-xyz
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+# Create the same device and wait until it is in state "active", (when it's
+# ready for other API operations). Fail if the device is not "active" in
+# 10 minutes.
+
+- name: Create device and wait up to 10 minutes for active state
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+ wait_timeout: 600
+
+- name: Create 3 ubuntu devices called server-01, server-02 and server-03
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: server-%02d
+ count: 3
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
+ hosts: localhost
+ tasks:
+ - name: Create 3 devices and register their facts
+ community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_stable
+ plan: baremetal_0
+ facility: ewr1
+ locked: true
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - {{ lookup('file', 'my_packet_sshkey') }}
+ coreos:
+ etcd:
+ discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
+ addr: $private_ipv4:4001
+ peer-addr: $private_ipv4:7001
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: Wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ with_items: "{{ newhosts.devices }}"
+
+
+# Other states of devices
+
+- name: Remove 3 devices by uuid
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ state: absent
+ device_ids:
+ - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
+ - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
+ - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
+'''
+
+RETURN = '''
+changed:
+ description: True if a device was altered in any way (created, modified or removed)
+ type: bool
+ sample: True
+ returned: success
+
+devices:
+ description: Information about each device that was processed
+ type: list
+ sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
+ "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
+ "tags": [], "locked": false, "state": "provisioning",
+ "public_ipv6": ""2604:1380:2:5200::3"}]'
+ returned: success
+''' # NOQA
+
+
+import os
+import re
+import time
+import uuid
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+MAX_DEVICES = 100
+
+PACKET_DEVICE_STATES = (
+ 'queued',
+ 'provisioning',
+ 'failed',
+ 'powering_on',
+ 'active',
+ 'powering_off',
+ 'inactive',
+ 'rebooting',
+)
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
+
+
+def serialize_device(device):
+ """
+ Standard representation for a device as returned by various tasks::
+
+ {
+ 'id': 'device_id'
+ 'hostname': 'device_hostname',
+ 'tags': [],
+ 'locked': false,
+ 'state': 'provisioning',
+ 'ip_addresses': [
+ {
+ "address": "147.75.194.227",
+ "address_family": 4,
+ "public": true
+ },
+ {
+ "address": "2604:1380:2:5200::3",
+ "address_family": 6,
+ "public": true
+ },
+ {
+ "address": "10.100.11.129",
+ "address_family": 4,
+ "public": false
+ }
+ ],
+ "private_ipv4": "10.100.11.129",
+ "public_ipv4": "147.75.194.227",
+ "public_ipv6": "2604:1380:2:5200::3",
+ }
+
+ """
+ device_data = {}
+ device_data['id'] = device.id
+ device_data['hostname'] = device.hostname
+ device_data['tags'] = device.tags
+ device_data['locked'] = device.locked
+ device_data['state'] = device.state
+ device_data['ip_addresses'] = [
+ {
+ 'address': addr_data['address'],
+ 'address_family': addr_data['address_family'],
+ 'public': addr_data['public'],
+ }
+ for addr_data in device.ip_addresses
+ ]
+ # Also include each IPs as a key for easier lookup in roles.
+ # Key names:
+ # - public_ipv4
+ # - public_ipv6
+ # - private_ipv4
+ # - private_ipv6 (if there is one)
+ for ipdata in device_data['ip_addresses']:
+ if ipdata['public']:
+ if ipdata['address_family'] == 6:
+ device_data['public_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['public_ipv4'] = ipdata['address']
+ elif not ipdata['public']:
+ if ipdata['address_family'] == 6:
+ # Packet doesn't give public ipv6 yet, but maybe one
+ # day they will
+ device_data['private_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['private_ipv4'] = ipdata['address']
+ return device_data
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def listify_string_name_or_id(s):
+ if ',' in s:
+ return s.split(',')
+ else:
+ return [s]
+
+
+def get_hostname_list(module):
+ # hostname is a list-typed param, so I guess it should return list
+ # (and it does, in Ansible 2.2.1) but in order to be defensive,
+ # I keep here the code to convert an eventual string to list
+ hostnames = module.params.get('hostnames')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ if isinstance(hostnames, str):
+ hostnames = listify_string_name_or_id(hostnames)
+ if not isinstance(hostnames, list):
+ raise Exception("name %s is not convertible to list" % hostnames)
+
+ # at this point, hostnames is a list
+ hostnames = [h.strip() for h in hostnames]
+
+ if (len(hostnames) > 1) and (count > 1):
+ _msg = ("If you set count>1, you should only specify one hostname "
+ "with the %d formatter, not a list of hostnames.")
+ raise Exception(_msg)
+
+ if (len(hostnames) == 1) and (count > 0):
+ hostname_spec = hostnames[0]
+ count_range = range(count_offset, count_offset + count)
+ if re.search(r"%\d{0,2}d", hostname_spec):
+ hostnames = [hostname_spec % i for i in count_range]
+ elif count > 1:
+ hostname_spec = '%s%%02d' % hostname_spec
+ hostnames = [hostname_spec % i for i in count_range]
+
+ for hn in hostnames:
+ if not is_valid_hostname(hn):
+ raise Exception("Hostname '%s' does not seem to be valid" % hn)
+
+ if len(hostnames) > MAX_DEVICES:
+ raise Exception("You specified too many hostnames, max is %d" %
+ MAX_DEVICES)
+ return hostnames
+
+
+def get_device_id_list(module):
+ device_ids = module.params.get('device_ids')
+
+ if isinstance(device_ids, str):
+ device_ids = listify_string_name_or_id(device_ids)
+
+ device_ids = [di.strip() for di in device_ids]
+
+ for di in device_ids:
+ if not is_valid_uuid(di):
+ raise Exception("Device ID '%s' does not seem to be valid" % di)
+
+ if len(device_ids) > MAX_DEVICES:
+ raise Exception("You specified too many devices, max is %d" %
+ MAX_DEVICES)
+ return device_ids
+
+
+def create_single_device(module, packet_conn, hostname):
+
+ for param in ('hostnames', 'operating_system', 'plan'):
+ if not module.params.get(param):
+ raise Exception("%s parameter is required for new device."
+ % param)
+ project_id = module.params.get('project_id')
+ plan = module.params.get('plan')
+ tags = module.params.get('tags')
+ user_data = module.params.get('user_data')
+ facility = module.params.get('facility')
+ operating_system = module.params.get('operating_system')
+ locked = module.params.get('locked')
+ ipxe_script_url = module.params.get('ipxe_script_url')
+ always_pxe = module.params.get('always_pxe')
+ if operating_system != 'custom_ipxe':
+ for param in ('ipxe_script_url', 'always_pxe'):
+ if module.params.get(param):
+ raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
+
+ device = packet_conn.create_device(
+ project_id=project_id,
+ hostname=hostname,
+ tags=tags,
+ plan=plan,
+ facility=facility,
+ operating_system=operating_system,
+ userdata=user_data,
+ locked=locked,
+ ipxe_script_url=ipxe_script_url,
+ always_pxe=always_pxe)
+ return device
+
+
+def refresh_device_list(module, packet_conn, devices):
+ device_ids = [d.id for d in devices]
+ new_device_list = get_existing_devices(module, packet_conn)
+ return [d for d in new_device_list if d.id in device_ids]
+
+
+def wait_for_devices_active(module, packet_conn, watched_devices):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ refreshed = watched_devices
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, watched_devices)
+ if all(d.state == 'active' for d in refreshed):
+ return refreshed
+ time.sleep(5)
+ raise Exception("Waiting for state \"active\" timed out for devices: %s"
+ % [d.hostname for d in refreshed if d.state != "active"])
+
+
+def wait_for_public_IPv(module, packet_conn, created_devices):
+
+ def has_public_ip(addr_list, ip_v):
+ return any([a['public'] and a['address_family'] == ip_v and
+ a['address'] for a in addr_list])
+
+ def all_have_public_ip(ds, ip_v):
+ return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
+
+ address_family = module.params.get('wait_for_public_IPv')
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, created_devices)
+ if all_have_public_ip(refreshed, address_family):
+ return refreshed
+ time.sleep(5)
+
+ raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
+ % (address_family, [d.hostname for d in created_devices]))
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ return packet_conn.list_devices(
+ project_id, params={
+ 'per_page': MAX_DEVICES})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_ids'):
+ device_id_list = get_device_id_list(module)
+ return {'ids': device_id_list, 'hostnames': []}
+ elif module.params.get('hostnames'):
+ hostname_list = get_hostname_list(module)
+ return {'hostnames': hostname_list, 'ids': []}
+
+
+def act_on_devices(module, packet_conn, target_state):
+ specified_identifiers = get_specified_device_identifiers(module)
+ existing_devices = get_existing_devices(module, packet_conn)
+ changed = False
+ create_hostnames = []
+ if target_state in ['present', 'active', 'rebooted']:
+ # states where we might create non-existing specified devices
+ existing_devices_names = [ed.hostname for ed in existing_devices]
+ create_hostnames = [hn for hn in specified_identifiers['hostnames']
+ if hn not in existing_devices_names]
+
+ process_devices = [d for d in existing_devices
+ if (d.id in specified_identifiers['ids']) or
+ (d.hostname in specified_identifiers['hostnames'])]
+
+ if target_state != 'present':
+ _absent_state_map = {}
+ for s in PACKET_DEVICE_STATES:
+ _absent_state_map[s] = packet.Device.delete
+
+ state_map = {
+ 'absent': _absent_state_map,
+ 'active': {'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ 'inactive': {'active': packet.Device.power_off},
+ 'rebooted': {'active': packet.Device.reboot,
+ 'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ }
+
+ # First do non-creation actions, it might be faster
+ for d in process_devices:
+ if d.state == target_state:
+ continue
+ if d.state in state_map[target_state]:
+ api_operation = state_map[target_state].get(d.state)
+ if api_operation is not None:
+ api_operation(d)
+ changed = True
+ else:
+ _msg = (
+ "I don't know how to process existing device %s from state %s "
+ "to state %s" %
+ (d.hostname, d.state, target_state))
+ raise Exception(_msg)
+
+ # At last create missing devices
+ created_devices = []
+ if create_hostnames:
+ created_devices = [create_single_device(module, packet_conn, n)
+ for n in create_hostnames]
+ if module.params.get('wait_for_public_IPv'):
+ created_devices = wait_for_public_IPv(
+ module, packet_conn, created_devices)
+ changed = True
+
+ processed_devices = created_devices + process_devices
+ if target_state == 'active':
+ processed_devices = wait_for_devices_active(
+ module, packet_conn, processed_devices)
+
+ return {
+ 'changed': changed,
+ 'devices': [serialize_device(d) for d in processed_devices]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ count=dict(type='int', default=1),
+ count_offset=dict(type='int', default=1),
+ device_ids=dict(type='list'),
+ facility=dict(),
+ features=dict(type='dict'),
+ hostnames=dict(type='list', aliases=['name']),
+ tags=dict(type='list', elements='str'),
+ locked=dict(type='bool', default=False, aliases=['lock']),
+ operating_system=dict(),
+ plan=dict(),
+ project_id=dict(required=True),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ user_data=dict(default=None),
+ wait_for_public_IPv=dict(type='int', choices=[4, 6]),
+ wait_timeout=dict(type='int', default=900),
+ ipxe_script_url=dict(default=''),
+ always_pxe=dict(type='bool', default=False),
+ ),
+ required_one_of=[('device_ids', 'hostnames',)],
+ mutually_exclusive=[
+ ('hostnames', 'device_ids'),
+ ('count', 'device_ids'),
+ ('count_offset', 'device_ids'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_devices(module, packet_conn, state))
+ except Exception as e:
+ module.fail_json(msg='failed to set device state %s, error: %s' %
+ (state, to_native(e)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
new file mode 100644
index 00000000..fbc12698
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_ip_subnet
+
+short_description: Assign IP subnet to a bare metal server.
+
+description:
+ - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
+ - IPv4 subnets must come from already reserved block.
+ - IPv6 subnets must come from publicly routable /56 block from your project.
+ - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ hostname:
+ description:
+ - A hostname of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ device_id:
+ description:
+ - UUID of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ project_id:
+ description:
+ - UUID of a project of the device to/from which to assign/remove a subnet.
+ type: str
+
+ device_count:
+ description:
+ - The number of devices to retrieve from the project. The max allowed value is 1000.
+ - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
+ default: 100
+ type: int
+
+ cidr:
+ description:
+ - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
+ aliases: [name]
+ type: str
+ required: true
+
+ state:
+ description:
+ - Desired state of the IP subnet on the specified device.
+ - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
+ - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
+ - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+requirements:
+ - "packet-python >= 1.35"
+ - "python >= 2.6"
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+
+# Pick an IPv4 address from a block allocated to your project.
+
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostname: myserver
+ cidr: "147.75.201.78/32"
+
+# Release IP address 147.75.201.78
+
+- name: Unassign IP address from any device in your project
+ hosts: localhost
+ tasks:
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ cidr: "147.75.201.78/32"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: True if an IP address assignments were altered in any way (created or removed).
+ type: bool
+ sample: True
+ returned: success
+
+device_id:
+ type: str
+ description: UUID of the device associated with the specified IP address.
+ returned: success
+
+subnet:
+ description: Dict with data about the handled IP subnet.
+ type: dict
+ sample:
+ address: 147.75.90.241
+ address_family: 4
+ assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
+ cidr: 31
+ created_at: '2017-08-07T15:15:30Z'
+ enabled: True
+ gateway: 147.75.90.240
+ href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
+ id: 1eda960-0a16-4c0f-b196-f3dc4928529f
+ manageable: True
+ management: True
+ netmask: 255.255.255.254
+ network: 147.75.90.240
+ public: True
+ returned: success
+'''
+
+
+import uuid
+import re
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+PROJECT_MAX_DEVICES = 100
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'present']
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ if not is_valid_uuid(project_id):
+ raise Exception("Project ID {0} does not seem to be valid".format(project_id))
+
+ per_page = module.params.get('device_count')
+ return packet_conn.list_devices(
+ project_id, params={'per_page': per_page})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_id'):
+ _d_id = module.params.get('device_id')
+ if not is_valid_uuid(_d_id):
+ raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
+ return {'device_id': _d_id, 'hostname': None}
+ elif module.params.get('hostname'):
+ _hn = module.params.get('hostname')
+ if not is_valid_hostname(_hn):
+ raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
+ return {'hostname': _hn, 'device_id': None}
+ else:
+ return {'hostname': None, 'device_id': None}
+
+
+def parse_subnet_cidr(cidr):
+ if "/" not in cidr:
+ raise Exception("CIDR expression in wrong format, must be address/prefix_len")
+ addr, prefixlen = cidr.split("/")
+ try:
+ prefixlen = int(prefixlen)
+ except ValueError:
+ raise("Wrong prefix length in CIDR expression {0}".format(cidr))
+ return addr, prefixlen
+
+
+def act_on_assignment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ specified_cidr = module.params.get("cidr")
+ address, prefixlen = parse_subnet_cidr(specified_cidr)
+
+ specified_identifier = get_specified_device_identifiers(module)
+
+ if module.check_mode:
+ return return_dict
+
+ if (specified_identifier['hostname'] is None) and (
+ specified_identifier['device_id'] is None):
+ if target_state == 'absent':
+ # The special case to release the IP from any assignment
+ for d in get_existing_devices(module, packet_conn):
+ for ia in d.ip_addresses:
+ if address == ia['address'] and prefixlen == ia['cidr']:
+ packet_conn.call_api(ia['href'], "DELETE")
+ return_dict['changed'] = True
+ return_dict['subnet'] = ia
+ return_dict['device_id'] = d.id
+ return return_dict
+ raise Exception("If you assign an address, you must specify either "
+ "target device ID or target unique hostname.")
+
+ if specified_identifier['device_id'] is not None:
+ device = packet_conn.get_device(specified_identifier['device_id'])
+ else:
+ all_devices = get_existing_devices(module, packet_conn)
+ hn = specified_identifier['hostname']
+ matching_devices = [d for d in all_devices if d.hostname == hn]
+ if len(matching_devices) > 1:
+ raise Exception("There are more than one devices matching given hostname {0}".format(hn))
+ if len(matching_devices) == 0:
+ raise Exception("There is no device matching given hostname {0}".format(hn))
+ device = matching_devices[0]
+
+ return_dict['device_id'] = device.id
+ assignment_dicts = [i for i in device.ip_addresses
+ if i['address'] == address and i['cidr'] == prefixlen]
+ if len(assignment_dicts) > 1:
+ raise Exception("IP address {0} is assigned more than once for device {1}".format(
+ specified_cidr, device.hostname))
+
+ if target_state == "absent":
+ if len(assignment_dicts) == 1:
+ packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
+ return_dict['subnet'] = assignment_dicts[0]
+ return_dict['changed'] = True
+ elif target_state == "present":
+ if len(assignment_dicts) == 0:
+ new_assignment = packet_conn.call_api(
+ "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
+ return_dict['changed'] = True
+ return_dict['subnet'] = new_assignment
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ device_id=dict(type='str'),
+ hostname=dict(type='str'),
+ project_id=dict(type='str'),
+ device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
+ cidr=dict(type='str', required=True, aliases=['name']),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[('hostname', 'device_id')],
+ required_one_of=[['hostname', 'device_id', 'project_id']],
+ required_by=dict(
+ hostname=('project_id',),
+ ),
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_assignment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
new file mode 100644
index 00000000..38d7ca76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2019, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_project
+
+short_description: Create/delete a project in Packet host.
+
+description:
+ - Create/delete a project in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#projects).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ payment_method:
+ description:
+ - Payment method is name of one of the payment methods available to your user.
+ - When blank, the API assumes the default payment method.
+ type: str
+
+ auth_token:
+ description:
+ - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Name for/of the project.
+ type: str
+
+ org_id:
+ description:
+ - UUID of the organization to create a project for.
+ - When blank, the API assumes the default organization.
+ type: str
+
+ id:
+ description:
+ - UUID of the project which you want to remove.
+ type: str
+
+ custom_data:
+ description:
+ - Custom data about the project to create.
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.40"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create new project
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "new project"
+
+- name: Create new project within non-default organization
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "my org project"
+ org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
+
+- name: Remove project by id
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+
+- name: Create new project with non-default billing method
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "newer project"
+ payment_method: "the other visa"
+'''
+
+RETURN = '''
+changed:
+ description: True if a project was created or removed.
+ type: bool
+ sample: True
+ returned: success
+
+name:
+ description: Name of addressed project.
+ type: str
+ returned: success
+
+id:
+ description: UUID of addressed project.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def act_on_project(target_state, module, packet_conn):
+ result_dict = {'changed': False}
+ given_id = module.params.get('id')
+ given_name = module.params.get('name')
+ if given_id:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_id == p.id]
+ else:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_name == p.name]
+
+ if target_state == 'present':
+ if len(matching_projects) == 0:
+ org_id = module.params.get('org_id')
+ custom_data = module.params.get('custom_data')
+ payment_method = module.params.get('payment_method')
+
+ if not org_id:
+ params = {
+ "name": given_name,
+ "payment_method_id": payment_method,
+ "customdata": custom_data
+ }
+ new_project_data = packet_conn.call_api("projects", "POST", params)
+ new_project = packet.Project(new_project_data, packet_conn)
+ else:
+ new_project = packet_conn.create_organization_project(
+ org_id=org_id,
+ name=given_name,
+ payment_method_id=payment_method,
+ customdata=custom_data
+ )
+
+ result_dict['changed'] = True
+ matching_projects.append(new_project)
+
+ result_dict['name'] = matching_projects[0].name
+ result_dict['id'] = matching_projects[0].id
+ else:
+ if len(matching_projects) > 1:
+ _msg = ("More than projects matched for module call with state = absent: "
+ "{0}".format(to_native(matching_projects)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_projects) == 1:
+ p = matching_projects[0]
+ result_dict['name'] = p.name
+ result_dict['id'] = p.id
+ result_dict['changed'] = True
+ try:
+ p.delete()
+ except Exception as e:
+ _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
+ p.name, p.id, to_native(e)))
+ module.fail_json(msg=_msg)
+ return result_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ org_id=dict(type='str'),
+ payment_method=dict(type='str'),
+ custom_data=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id",)],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ]
+ )
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_project(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set project state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
new file mode 100644
index 00000000..73233d89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_sshkey
+short_description: Create/delete an SSH key in Packet host.
+description:
+ - Create/delete an SSH key in Packet host.
+ - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
+author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ label:
+ description:
+ - Label for the key. If you keep it empty, it will be read from key string.
+ id:
+ description:
+ - UUID of the key which you want to remove.
+ fingerprint:
+ description:
+ - Fingerprint of the key which you want to remove.
+ key:
+ description:
+ - Public Key string ({type} {base64 encoded key} {description}).
+ key_file:
+ description:
+ - File with the public key.
+
+requirements:
+ - "python >= 2.6"
+ - packet-python
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create sshkey from string
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
+
+- name: Create sshkey from file
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ label: key from file
+ key_file: ~/ff.pub
+
+- name: Remove sshkey by id
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+'''
+
+RETURN = '''
+changed:
+ description: True if a sshkey was created or removed.
+ type: bool
+ sample: True
+ returned: always
+sshkeys:
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample: [
+ {
+ "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
+ "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
+ "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
+ "label": "mynewkey33"
+ }
+ ]
+ returned: always
+''' # NOQA
+
+import os
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def serialize_sshkey(sshkey):
+ sshkey_data = {}
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
+ for name in copy_keys:
+ sshkey_data[name] = getattr(sshkey, name)
+ return sshkey_data
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def load_key_string(key_str):
+ ret_dict = {}
+ key_str = key_str.strip()
+ ret_dict['key'] = key_str
+ cut_key = key_str.split()
+ if len(cut_key) in [2, 3]:
+ if len(cut_key) == 3:
+ ret_dict['label'] = cut_key[2]
+ else:
+ raise Exception("Public key %s is in wrong format" % key_str)
+ return ret_dict
+
+
+def get_sshkey_selector(module):
+ key_id = module.params.get('id')
+ if key_id:
+ if not is_valid_uuid(key_id):
+ raise Exception("sshkey ID %s is not valid UUID" % key_id)
+ selecting_fields = ['label', 'fingerprint', 'id', 'key']
+ select_dict = {}
+ for f in selecting_fields:
+ if module.params.get(f) is not None:
+ select_dict[f] = module.params.get(f)
+
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as _file:
+ loaded_key = load_key_string(_file.read())
+ select_dict['key'] = loaded_key['key']
+ if module.params.get('label') is None:
+ if loaded_key.get('label'):
+ select_dict['label'] = loaded_key['label']
+
+ def selector(k):
+ if 'key' in select_dict:
+ # if key string is specified, compare only the key strings
+ return k.key == select_dict['key']
+ else:
+ # if key string not specified, all the fields must match
+ return all([select_dict[f] == getattr(k, f) for f in select_dict])
+ return selector
+
+
+def act_on_sshkeys(target_state, module, packet_conn):
+ selector = get_sshkey_selector(module)
+ existing_sshkeys = packet_conn.list_ssh_keys()
+ matching_sshkeys = filter(selector, existing_sshkeys)
+ changed = False
+ if target_state == 'present':
+ if matching_sshkeys == []:
+ # there is no key matching the fields from module call
+ # => create the key, label and
+ newkey = {}
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as f:
+ newkey = load_key_string(f.read())
+ if module.params.get('key'):
+ newkey = load_key_string(module.params.get('key'))
+ if module.params.get('label'):
+ newkey['label'] = module.params.get('label')
+ for param in ('label', 'key'):
+ if param not in newkey:
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
+ raise Exception(_msg)
+ matching_sshkeys = []
+ new_key_response = packet_conn.create_ssh_key(
+ newkey['label'], newkey['key'])
+ changed = True
+
+ matching_sshkeys.append(new_key_response)
+ else:
+ # state is 'absent' => delete matching keys
+ for k in matching_sshkeys:
+ try:
+ k.delete()
+ changed = True
+ except Exception as e:
+ _msg = ("while trying to remove sshkey %s, id %s %s, "
+ "got error: %s" %
+ (k.label, k.id, target_state, e))
+ raise Exception(_msg)
+
+ return {
+ 'changed': changed,
+ 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ label=dict(type='str', aliases=['name'], default=None),
+ id=dict(type='str', default=None),
+ fingerprint=dict(type='str', default=None),
+ key=dict(type='str', default=None, no_log=True),
+ key_file=dict(type='path', default=None),
+ ),
+ mutually_exclusive=[
+ ('label', 'id'),
+ ('label', 'fingerprint'),
+ ('id', 'fingerprint'),
+ ('key', 'fingerprint'),
+ ('key', 'id'),
+ ('key_file', 'key'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ try:
+ module.exit_json(**act_on_sshkeys(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(msg='failed to set sshkey state: %s' % str(e))
+ else:
+ module.fail_json(msg='%s is not a valid state for this module' % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
new file mode 100644
index 00000000..2966139a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_volume
+
+short_description: Create/delete a volume in Packet host.
+
+description:
+ - Create/delete a volume in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#volumes).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Desired state of the volume.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Selector for API-generated name of the volume
+ type: str
+
+ description:
+ description:
+ - User-defined description attribute for Packet volume.
+ - "It is used used as idempotent identifier - if volume with given
+ description exists, new one is not created."
+ type: str
+
+ id:
+ description:
+ - UUID of a volume.
+ type: str
+
+ plan:
+ description:
+ - storage_1 for standard tier, storage_2 for premium (performance) tier.
+ - Tiers are described at U(https://www.packet.com/cloud/storage/).
+ choices: ['storage_1', 'storage_2']
+ default: 'storage_1'
+ type: str
+
+ facility:
+ description:
+ - Location of the volume.
+ - Volumes can only be attached to device in the same location.
+ type: str
+
+ size:
+ description:
+ - Size of the volume in gigabytes.
+ type: int
+
+ locked:
+ description:
+ - Create new volume locked.
+ type: bool
+ default: False
+
+ billing_cycle:
+ description:
+ - Billing cycle for new volume.
+ choices: ['hourly', 'monthly']
+ default: 'hourly'
+ type: str
+
+ snapshot_policy:
+ description:
+ - Snapshot policy for new volume.
+ type: dict
+
+ suboptions:
+ snapshot_count:
+ description:
+ - How many snapshots to keep, a positive integer.
+ required: True
+ type: int
+
+ snapshot_frequency:
+ description:
+ - Frequency of snapshots.
+ required: True
+ choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+ vars:
+ volname: testvol123
+ project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ community.general.packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: 'ewr1'
+ plan: 'storage_1'
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+ register: result_create
+
+ - name: Delete volume
+ community.general.packet_volume:
+ id: "{{ result_create.id }}"
+ project_id: "{{ project_id }}"
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: UUID of specified volume
+ type: str
+ returned: success
+ sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
+name:
+ description: The API-generated name of the volume resource.
+ type: str
+ returned: if volume is attached/detached to/from some device
+ sample: "volume-a91dc506"
+description:
+ description: The user-defined description of the volume resource.
+ type: str
+ returned: success
+ sample: "Just another volume"
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+VOLUME_PLANS = ["storage_1", "storage_2"]
+VOLUME_STATES = ["present", "absent"]
+BILLING = ["hourly", "monthly"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(module):
+ if module.params.get('id'):
+ i = module.params.get('id')
+ if not is_valid_uuid(i):
+ raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
+ return lambda v: v['id'] == i
+ elif module.params.get('name'):
+ n = module.params.get('name')
+ return lambda v: v['name'] == n
+ elif module.params.get('description'):
+ d = module.params.get('description')
+ return lambda v: v['description'] == d
+
+
+def get_or_fail(params, key):
+ item = params.get(key)
+ if item is None:
+ raise Exception("{0} must be specified for new volume".format(key))
+ return item
+
+
+def act_on_volume(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ s = get_volume_selector(module)
+ project_id = module.params.get("project_id")
+ api_method = "projects/{0}/storage".format(project_id)
+ all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
+ matching_volumes = [v for v in all_volumes if s(v)]
+
+ if target_state == "present":
+ if len(matching_volumes) == 0:
+ params = {
+ "description": get_or_fail(module.params, "description"),
+ "size": get_or_fail(module.params, "size"),
+ "plan": get_or_fail(module.params, "plan"),
+ "facility": get_or_fail(module.params, "facility"),
+ "locked": get_or_fail(module.params, "locked"),
+ "billing_cycle": get_or_fail(module.params, "billing_cycle"),
+ "snapshot_policies": module.params.get("snapshot_policy"),
+ }
+
+ new_volume_data = packet_conn.call_api(api_method, "POST", params)
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = new_volume_data[k]
+
+ else:
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = matching_volumes[0][k]
+
+ else:
+ if len(matching_volumes) > 1:
+ _msg = ("More than one volume matches in module call for absent state: {0}".format(
+ to_native(matching_volumes)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_volumes) == 1:
+ volume = matching_volumes[0]
+ packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = volume[k]
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str', default=None),
+ description=dict(type="str", default=None),
+ name=dict(type='str', default=None),
+ state=dict(choices=VOLUME_STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ project_id=dict(required=True),
+ plan=dict(choices=VOLUME_PLANS, default="storage_1"),
+ facility=dict(type="str"),
+ size=dict(type="int"),
+ locked=dict(type="bool", default=False),
+ snapshot_policy=dict(type='dict', default=None),
+ billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id", "description")],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ('id', 'description'),
+ ('name', 'description'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in VOLUME_STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_volume(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume state {0}: {1}".format(
+ state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
new file mode 100644
index 00000000..a1a38bb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_volume_attachment
+
+short_description: Attach/detach a volume to a device in the Packet host.
+
+description:
+ - Attach/detach a volume to a device in the Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/volumes/).
+ - "This module creates the attachment route in the Packet API. In order to discover
+ the block devices on the server, you have to run the Attach Scripts,
+ as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the attachment.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ project_id:
+ description:
+ - UUID of the project to which the device and volume belong.
+ type: str
+ required: true
+
+ volume:
+ description:
+ - Selector for the volume.
+ - It can be a UUID, an API-generated volume name, or user-defined description string.
+ - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
+ type: str
+ required: true
+
+ device:
+ description:
+ - Selector for the device.
+ - It can be a UUID of the device, or a hostname.
+ - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+
+ vars:
+ volname: testvol
+ devname: testdev
+ project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: ewr1
+ plan: storage_1
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+
+ - name: Create a device
+ packet_device:
+ project_id: "{{ project_id }}"
+ hostnames: "{{ devname }}"
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: ewr1
+ state: present
+
+ - name: Attach testvol to testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+
+ - name: Detach testvol from testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+ state: absent
+'''
+
+RETURN = '''
+volume_id:
+ description: UUID of volume addressed by the module call.
+ type: str
+ returned: success
+
+device_id:
+ description: UUID of device addressed by the module call.
+ type: str
+ returned: success
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+STATES = ["present", "absent"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['name'] == spec or v['description'] == spec
+
+
+def get_device_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['hostname'] == spec
+
+
+def do_attach(packet_conn, vol_id, dev_id):
+ api_method = "storage/{0}/attachments".format(vol_id)
+ packet_conn.call_api(
+ api_method,
+ params={"device_id": dev_id},
+ type="POST")
+
+
+def do_detach(packet_conn, vol, dev_id=None):
+ def dev_match(a):
+ return (dev_id is None) or (a['device']['id'] == dev_id)
+ for a in vol['attachments']:
+ if dev_match(a):
+ print(a['href'])
+ packet_conn.call_api(a['href'], type="DELETE")
+
+
+def validate_selected(l, resource_type, spec):
+ if len(l) > 1:
+ _msg = ("more than one {0} matches specification {1}: {2}".format(
+ resource_type, spec, l))
+ raise Exception(_msg)
+ if len(l) == 0:
+ _msg = "no {0} matches specification: {1}".format(resource_type, spec)
+ raise Exception(_msg)
+
+
+def get_attached_dev_ids(volume_dict):
+ if len(volume_dict['attachments']) == 0:
+ return []
+ else:
+ return [a['device']['id'] for a in volume_dict['attachments']]
+
+
+def act_on_volume_attachment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ volspec = module.params.get("volume")
+ devspec = module.params.get("device")
+ if devspec is None and target_state == 'present':
+ raise Exception("If you want to attach a volume, you must specify a device.")
+ project_id = module.params.get("project_id")
+ volumes_api_method = "projects/{0}/storage".format(project_id)
+ volumes = packet_conn.call_api(volumes_api_method,
+ params={'include': 'facility,attachments.device'})['volumes']
+ v_match = get_volume_selector(volspec)
+ matching_volumes = [v for v in volumes if v_match(v)]
+ validate_selected(matching_volumes, "volume", volspec)
+ volume = matching_volumes[0]
+ return_dict['volume_id'] = volume['id']
+
+ device = None
+ if devspec is not None:
+ devices_api_method = "projects/{0}/devices".format(project_id)
+ devices = packet_conn.call_api(devices_api_method)['devices']
+ d_match = get_device_selector(devspec)
+ matching_devices = [d for d in devices if d_match(d)]
+ validate_selected(matching_devices, "device", devspec)
+ device = matching_devices[0]
+ return_dict['device_id'] = device['id']
+
+ attached_device_ids = get_attached_dev_ids(volume)
+
+ if target_state == "present":
+ if len(attached_device_ids) == 0:
+ do_attach(packet_conn, volume['id'], device['id'])
+ return_dict['changed'] = True
+ elif device['id'] not in attached_device_ids:
+ # Don't reattach volume which is attached to a different device.
+ # Rather fail than force remove a device on state == 'present'.
+ raise Exception("volume {0} is already attached to device {1}".format(
+ volume, attached_device_ids))
+ else:
+ if device is None:
+ if len(attached_device_ids) > 0:
+ do_detach(packet_conn, volume)
+ return_dict['changed'] = True
+ elif device['id'] in attached_device_ids:
+ do_detach(packet_conn, volume, device['id'])
+ return_dict['changed'] = True
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ volume=dict(type="str", required=True),
+ project_id=dict(type="str", required=True),
+ device=dict(type="str"),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(
+ **act_on_volume_attachment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
new file mode 100644
index 00000000..90798672
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The name of the virtual machine.
+ type: str
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ type: str
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ default: 2
+ type: int
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ default: 2048
+ type: int
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ type: str
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ type: int
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ type: str
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ count:
+ description:
+ - The number of virtual machines to create.
+ type: int
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ type: str
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ type: bool
+ default: 'no'
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ type: int
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - create or terminate instances
+ - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ type: str
+ default: 'present'
+ disk_type:
+ description:
+ - the type of disk to be allocated.
+ type: str
+ choices: [SSD, HDD]
+ default: HDD
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example
+- name: Create three servers and enumerate their names
+ community.general.profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+- name: Remove virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+- name: Start virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+- name: Stop virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+'''
+
+import re
+import uuid
+import time
+import traceback
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
new file mode 100644
index 00000000..e3ba1d49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ type: str
+ description:
+ description:
+ - The description of the virtual datacenter.
+ type: str
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ type: str
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Create or terminate datacenters.
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a datacenter
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600, type='int'),
+ state=dict(default='present'), # @TODO add choices
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
new file mode 100644
index 00000000..49941241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ type: str
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ - If not specified, it defaults to a value based on UUID4.
+ type: str
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: true
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: true
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+- name: Remove a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _make_default_name():
+ return str(uuid.uuid4()).replace('-', '')[:10]
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(required=True),
+ server=dict(required=True),
+ name=dict(),
+ lan=dict(),
+ subscription_user=dict(required=True),
+ subscription_password=dict(required=True, no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ ),
+ required_if=(
+ ('state', 'absent', ['name']),
+ ('state', 'present', ['lan']),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
new file mode 100644
index 00000000..a63cbcdd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ type: str
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ type: str
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ required: false
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ required: false
+ disk_type:
+ description:
+ - The disk type of the volume.
+ type: str
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ type: str
+ required: false
+ default: UNKNOWN
+ count:
+ description:
+ - The number of volumes you wish to create.
+ type: int
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ type: bool
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create multiple volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+- name: Remove Volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+import traceback
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, n)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
new file mode 100644
index 00000000..72f03e67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ type: str
+ volume:
+ description:
+ - The volume name or ID.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Attach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+- name: Detach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
new file mode 100644
index 00000000..8d9374a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+#
+# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
+# Frameworks
+# Copyright (C) 2016 PubNub Inc.
+# http://www.pubnub.com/
+# http://www.pubnub.com/terms
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pubnub_blocks
+short_description: PubNub blocks management module.
+description:
+ - "This module allows Ansible to interface with the PubNub BLOCKS
+ infrastructure by providing the following operations: create / remove,
+ start / stop and rename for blocks and create / modify / remove for event
+ handlers"
+author:
+ - PubNub <support@pubnub.com> (@pubnub)
+ - Sergey Mamontov <sergey@pubnub.com> (@parfeon)
+requirements:
+ - "python >= 2.7"
+ - "pubnub_blocks_client >= 1.0"
+options:
+ email:
+ description:
+ - Email from account for which new session should be started.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ password:
+ description:
+ - Password which match to account to which specified C(email) belong.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ cache:
+ description: >
+ In case if single play use blocks management module few times it is
+ preferred to enabled 'caching' by making previous module to share
+ gathered artifacts and pass them to this parameter.
+ required: false
+ type: dict
+ default: {}
+ account:
+ description:
+ - "Name of PubNub account for from which C(application) will be used to
+ manage blocks."
+ - "User's account will be used if value not set or empty."
+ type: str
+ required: false
+ application:
+ description:
+ - "Name of target PubNub application for which blocks configuration on
+ specific C(keyset) will be done."
+ type: str
+ required: true
+ keyset:
+ description:
+ - Name of application's keys set which is bound to managed blocks.
+ type: str
+ required: true
+ state:
+ description:
+ - "Intended block state after event handlers creation / update process
+ will be completed."
+ required: false
+ default: 'present'
+ choices: ['started', 'stopped', 'present', 'absent']
+ type: str
+ name:
+ description:
+ - Name of managed block which will be later visible on admin.pubnub.com.
+ required: true
+ type: str
+ description:
+ description:
+ - Short block description which will be later visible on
+ admin.pubnub.com. Used only if block doesn't exists and won't change
+ description for existing block.
+ required: false
+ type: str
+ event_handlers:
+ description:
+ - "List of event handlers which should be updated for specified block
+ C(name)."
+ - "Each entry for new event handler should contain: C(name), C(src),
+ C(channels), C(event). C(name) used as event handler name which can be
+ used later to make changes to it."
+ - C(src) is full path to file with event handler code.
+ - "C(channels) is name of channel from which event handler is waiting
+ for events."
+ - "C(event) is type of event which is able to trigger event handler:
+ I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ - "Each entry for existing handlers should contain C(name) (so target
+ handler can be identified). Rest parameters (C(src), C(channels) and
+ C(event)) can be added if changes required for them."
+ - "It is possible to rename event handler by adding C(changes) key to
+ event handler payload and pass dictionary, which will contain single key
+ C(name), where new name should be passed."
+ - "To remove particular event handler it is possible to set C(state) for
+ it to C(absent) and it will be removed."
+ required: false
+ default: []
+ type: list
+ changes:
+ description:
+ - "List of fields which should be changed by block itself (doesn't
+ affect any event handlers)."
+ - "Possible options for change is: C(name)."
+ required: false
+ default: {}
+ type: dict
+ validate_certs:
+ description:
+ - "This key allow to try skip certificates check when performing REST API
+ calls. Sometimes host may have issues with certificates on it and this
+ will cause problems to call PubNub REST API."
+ - If check should be ignored C(False) should be passed to this parameter.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+# Event handler create example.
+- name: Create single event handler
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ src: '{{ path_to_handler_source }}'
+ name: '{{ handler_name }}'
+ event: 'js-before-publish'
+ channels: '{{ handler_channel }}'
+
+# Change event handler trigger event type.
+- name: Change event handler 'event'
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ name: '{{ handler_name }}'
+ event: 'js-after-publish'
+
+# Stop block and event handlers.
+- name: Stopping block
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: stop
+
+# Multiple module calls with cached result passing
+- name: Create '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_1_source }}'
+ name: '{{ event_handler_1_name }}'
+ channels: '{{ event_handler_1_channel }}'
+ event: 'js-before-publish'
+- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_2_source }}'
+ name: '{{ event_handler_2_name }}'
+ channels: '{{ event_handler_2_channel }}'
+ event: 'js-before-publish'
+- name: Start '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: started
+'''
+
+RETURN = '''
+module_cache:
+ description: "Cached account information. In case if with single play module
+ used few times it is better to pass cached data to next module calls to speed
+ up process."
+ type: dict
+ returned: always
+'''
+import copy
+import os
+
+try:
+ # Import PubNub BLOCKS client.
+ from pubnub_blocks_client import User, Account, Owner, Application, Keyset
+ from pubnub_blocks_client import Block, EventHandler
+ from pubnub_blocks_client import exceptions
+ HAS_PUBNUB_BLOCKS_CLIENT = True
+except ImportError:
+ HAS_PUBNUB_BLOCKS_CLIENT = False
+ User = None
+ Account = None
+ Owner = None
+ Application = None
+ Keyset = None
+ Block = None
+ EventHandler = None
+ exceptions = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+def pubnub_user(module):
+ """Create and configure user model if it possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+
+ :rtype: User
+ :return: Reference on initialized and ready to use user or 'None' in
+ case if not all required information has been passed to block.
+ """
+ user = None
+ params = module.params
+
+ if params.get('cache') and params['cache'].get('module_cache'):
+ cache = params['cache']['module_cache']
+ user = User()
+ user.restore(cache=copy.deepcopy(cache['pnm_user']))
+ elif params.get('email') and params.get('password'):
+ user = User(email=params.get('email'), password=params.get('password'))
+ else:
+ err_msg = 'It looks like not account credentials has been passed or ' \
+ '\'cache\' field doesn\'t have result of previous module ' \
+ 'call.'
+ module.fail_json(msg='Missing account credentials.',
+ description=err_msg, changed=False)
+
+ return user
+
+
+def pubnub_account(module, user):
+ """Create and configure account if it is possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type user: User
+ :param user: Reference on authorized user for which one of accounts
+ should be used during manipulations with block.
+
+ :rtype: Account
+ :return: Reference on initialized and ready to use account or 'None' in
+ case if not all required information has been passed to block.
+ """
+ params = module.params
+ if params.get('account'):
+ account_name = params.get('account')
+ account = user.account(name=params.get('account'))
+ if account is None:
+ err_frmt = 'It looks like there is no \'{0}\' account for ' \
+ 'authorized user. Please make sure what correct ' \
+ 'name has been passed during module configuration.'
+ module.fail_json(msg='Missing account.',
+ description=err_frmt.format(account_name),
+ changed=False)
+ else:
+ account = user.accounts()[0]
+
+ return account
+
+
+def pubnub_application(module, account):
+ """Retrieve reference on target application from account model.
+
+ NOTE: In case if account authorization will fail or there is no
+ application with specified name, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model from which reference
+ on application should be fetched.
+
+ :rtype: Application
+ :return: Reference on initialized and ready to use application model.
+ """
+ application = None
+ params = module.params
+ try:
+ application = account.application(params['application'])
+ except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=dict(account))
+
+ if application is None:
+ err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
+ 'correct application name has been passed. If application ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ email = account.owner.email
+ module.fail_json(msg=err_fmt.format(params['application'], email),
+ changed=account.changed, module_cache=dict(account))
+
+ return application
+
+
+def pubnub_keyset(module, account, application):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no keyset with specified name, module will
+ exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be
+ used in case of error to export cached data.
+ :type application: Application
+ :param application: Reference on PubNub application model from which
+ reference on keyset should be fetched.
+
+ :rtype: Keyset
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ params = module.params
+ keyset = application.keyset(params['keyset'])
+ if keyset is None:
+ err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
+ 'sure what correct keyset name has been passed. If keyset ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ module.fail_json(msg=err_fmt.format(params['keyset'],
+ application.name),
+ changed=account.changed, module_cache=dict(account))
+
+ return keyset
+
+
+def pubnub_block(module, account, keyset):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no block with specified name and module
+ configured to start/stop it, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be used in
+ case of error to export cached data.
+ :type keyset: Keyset
+ :param keyset: Reference on keyset model from which reference on block
+ should be fetched.
+
+ :rtype: Block
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ block = None
+ params = module.params
+ try:
+ block = keyset.block(params['name'])
+ except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed, module_cache=dict(account))
+
+ # Report error because block doesn't exists and at the same time
+ # requested to start/stop.
+ if block is None and params['state'] in ['started', 'stopped']:
+ block_name = params.get('name')
+ module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
+ changed=account.changed, module_cache=dict(account))
+
+ if block is None and params['state'] == 'present':
+ block = Block(name=params.get('name'),
+ description=params.get('description'))
+ keyset.add_block(block)
+
+ if block:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+ if params.get('description'):
+ block.description = params.get('description')
+
+ return block
+
+
+def pubnub_event_handler(block, data):
+ """Retrieve reference on target event handler from application model.
+
+ :type block: Block
+ :param block: Reference on block model from which reference on event
+ handlers should be fetched.
+ :type data: dict
+ :param data: Reference on dictionary which contain information about
+ event handler and whether it should be created or not.
+
+ :rtype: EventHandler
+ :return: Reference on initialized and ready to use event handler model.
+ 'None' will be returned in case if there is no handler with
+ specified name and no request to create it.
+ """
+ event_handler = block.event_handler(data['name'])
+
+ # Prepare payload for event handler update.
+ changed_name = (data.pop('changes').get('name')
+ if 'changes' in data else None)
+ name = data.get('name') or changed_name
+ channels = data.get('channels')
+ event = data.get('event')
+ code = _content_of_file_at_path(data.get('src'))
+ state = data.get('state') or 'present'
+
+ # Create event handler if required.
+ if event_handler is None and state == 'present':
+ event_handler = EventHandler(name=name, channels=channels, event=event,
+ code=code)
+ block.add_event_handler(event_handler)
+
+ # Update event handler if required.
+ if event_handler is not None and state == 'present':
+ if name is not None:
+ event_handler.name = name
+ if channels is not None:
+ event_handler.channels = channels
+ if event is not None:
+ event_handler.event = event
+ if code is not None:
+ event_handler.code = code
+
+ return event_handler
+
+
+def _failure_title_from_exception(exception):
+ """Compose human-readable title for module error title.
+
+ Title will be based on status codes if they has been provided.
+ :type exception: exceptions.GeneralPubNubError
+ :param exception: Reference on exception for which title should be
+ composed.
+
+ :rtype: str
+ :return: Reference on error tile which should be shown on module
+ failure.
+ """
+ title = 'General REST API access error.'
+ if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
+ title = 'Authorization error: missing credentials.'
+ elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
+ title = 'Authorization error: wrong credentials.'
+ elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
+ title = 'API access error: insufficient access rights.'
+ elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
+ title = 'API access error: time token expired.'
+ elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
+ title = 'Block create did fail: block with same name already exists).'
+ elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
+ title = 'Unable fetch list of blocks for keyset.'
+ elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
+ title = 'Block creation did fail.'
+ elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
+ title = 'Block update did fail.'
+ elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
+ title = 'Block removal did fail.'
+ elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
+ title = 'Block start/stop did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
+ title = 'Event handler creation did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
+ title = 'Event handler update did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
+ title = 'Event handler removal did fail.'
+
+ return title
+
+
+def _content_of_file_at_path(path):
+ """Read file content.
+
+ Try read content of file at specified path.
+ :type path: str
+ :param path: Full path to location of file which should be read'ed.
+ :rtype: content
+ :return: File content or 'None'
+ """
+ content = None
+ if path and os.path.exists(path):
+ with open(path, mode="rt") as opened_file:
+ b_content = opened_file.read()
+ try:
+ content = to_text(b_content, errors='surrogate_or_strict')
+ except UnicodeError:
+ pass
+
+ return content
+
+
+def main():
+ fields = dict(
+ email=dict(default='', required=False, type='str'),
+ password=dict(default='', required=False, type='str', no_log=True),
+ account=dict(default='', required=False, type='str'),
+ application=dict(required=True, type='str'),
+ keyset=dict(required=True, type='str'),
+ state=dict(default='present', type='str',
+ choices=['started', 'stopped', 'present', 'absent']),
+ name=dict(required=True, type='str'), description=dict(type='str'),
+ event_handlers=dict(default=list(), type='list'),
+ changes=dict(default=dict(), type='dict'),
+ cache=dict(default=dict(), type='dict'),
+ validate_certs=dict(default=True, type='bool'))
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+
+ if not HAS_PUBNUB_BLOCKS_CLIENT:
+ module.fail_json(msg='pubnub_blocks_client required for this module.')
+
+ params = module.params
+
+ # Authorize user.
+ user = pubnub_user(module)
+ # Initialize PubNub account instance.
+ account = pubnub_account(module, user=user)
+ # Try fetch application with which module should work.
+ application = pubnub_application(module, account=account)
+ # Try fetch keyset with which module should work.
+ keyset = pubnub_keyset(module, account=account, application=application)
+ # Try fetch block with which module should work.
+ block = pubnub_block(module, account=account, keyset=keyset)
+ is_new_block = block is not None and block.uid == -1
+
+ # Check whether block should be removed or not.
+ if block is not None and params['state'] == 'absent':
+ keyset.remove_block(block)
+ block = None
+
+ if block is not None:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+
+ # Process event changes to event handlers.
+ for event_handler_data in params.get('event_handlers') or list():
+ state = event_handler_data.get('state') or 'present'
+ event_handler = pubnub_event_handler(data=event_handler_data,
+ block=block)
+ if state == 'absent' and event_handler:
+ block.delete_event_handler(event_handler)
+
+ # Update block operation state if required.
+ if block and not is_new_block:
+ if params['state'] == 'started':
+ block.start()
+ elif params['state'] == 'stopped':
+ block.stop()
+
+ # Save current account state.
+ if not module.check_mode:
+ try:
+ account.save()
+ except (exceptions.APIAccessError, exceptions.KeysetError,
+ exceptions.BlockError, exceptions.EventHandlerError,
+ exceptions.GeneralPubNubError) as exc:
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=module_cache)
+
+ # Report module execution results.
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ changed_will_change = account.changed or account.will_change
+ module.exit_json(changed=changed_will_change, module_cache=module_cache)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
new file mode 100644
index 00000000..9f7df5c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
@@ -0,0 +1,897 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax
+short_description: create / delete an instance in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud instance and optionally
+ waits for it to be 'running'.
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number with the name of the
+ created servers. Only applicable when used with the I(group) attribute
+ or meta key.
+ type: bool
+ default: 'yes'
+ boot_from_volume:
+ description:
+ - Whether or not to boot the instance from a Cloud Block Storage volume.
+ If C(yes) and I(image) is specified a new volume will be created at
+ boot time. I(boot_volume_size) is required with I(image) to create a
+ new volume at boot time.
+ type: bool
+ default: 'no'
+ boot_volume:
+ type: str
+ description:
+ - Cloud Block Storage ID or Name to use as the boot volume of the
+ instance
+ boot_volume_size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes. This is only required with
+ I(image) and I(boot_from_volume).
+ default: 100
+ boot_volume_terminate:
+ description:
+ - Whether the I(boot_volume) or newly created volume from I(image) will
+ be terminated when the server is terminated
+ type: bool
+ default: 'no'
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ count:
+ type: int
+ description:
+ - number of instances to launch
+ default: 1
+ count_offset:
+ type: int
+ description:
+ - number count to start at
+ default: 1
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified it will assume the value C(auto).
+ choices:
+ - auto
+ - manual
+ exact_count:
+ description:
+ - Explicitly ensure an exact count of instances, used with
+ state=active/present. If specified as C(yes) and I(count) is less than
+ the servers matched, servers will be deleted to match the count. If
+ the number of matched servers is fewer than specified in I(count)
+ additional servers will be added.
+ type: bool
+ default: 'no'
+ extra_client_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ extra_create_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ files:
+ type: dict
+ description:
+ - Files to insert into the instance. remotefilename:localcontent
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ group:
+ type: str
+ description:
+ - host group to assign to server, is also used for idempotent operations
+ to ensure a specific number of instances
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
+ With I(boot_from_volume), a Cloud Block Storage volume will be created
+ with this image
+ instance_ids:
+ type: list
+ description:
+ - list of instance ids, currently only used when state='absent' to
+ remove instances
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ aliases:
+ - keypair
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the instance
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Jesse Keating (@omgjlk)"
+ - "Matt Martz (@sivel)"
+notes:
+ - I(exact_count) can be "destructive" if the number of running servers in
+ the I(group) is larger than that specified in I(count). In such a case, the
+ I(state) is effectively set to C(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have C(action)
+ set to C(delete), and the oldest servers in the group will be deleted.
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Server
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: rax-test1
+ flavor: 5
+ image: b11d9567-e412-4255-96b9-bd63ab23bcfe
+ key_name: my_rackspace_key
+ files:
+ /root/test.txt: /home/localuser/test.txt
+ wait: yes
+ state: present
+ networks:
+ - private
+ - public
+ register: rax
+
+- name: Build an exact count of cloud servers with incremented names
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: Server build requests
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: test%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ state: present
+ count: 10
+ count_offset: 10
+ exact_count: yes
+ group: test
+ wait: yes
+ register: rax
+'''
+
+import json
+import os
+import re
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
+ rax_find_image, rax_find_network, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.six import string_types
+
+
+def rax_find_server_image(module, server, image, boot_volume):
+ if not image and boot_volume:
+ vol = rax_find_bootable_volume(module, pyrax, server,
+ exit=False)
+ if not vol:
+ return None
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if vol_image_id:
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if server_image:
+ server.image = dict(id=server_image)
+
+ # Match image IDs taking care of boot from volume
+ if image and not server.image:
+ vol = rax_find_bootable_volume(module, pyrax, server)
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if not vol_image_id:
+ return None
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if image != server_image:
+ return None
+
+ server.image = dict(id=server_image)
+ elif image and server.image['id'] != image:
+ return None
+
+ return server.image
+
+
+def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
+ files=None, wait=True, wait_timeout=300, disk_config=None,
+ group=None, nics=None, extra_create_args=None, user_data=None,
+ config_drive=False, existing=None, block_device_mapping_v2=None):
+ names = [] if names is None else names
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ nics = [] if nics is None else nics
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+ existing = [] if existing is None else existing
+ block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
+
+ cs = pyrax.cloudservers
+ changed = False
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(os.path.expanduser(user_data)):
+ try:
+ user_data = os.path.expanduser(user_data)
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ # Handle the file contents
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ fileobj = open(lpath, 'r')
+ files[rpath] = fileobj.read()
+ fileobj.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+ try:
+ servers = []
+ bdmv2 = block_device_mapping_v2
+ for name in names:
+ servers.append(cs.servers.create(name=name, image=image,
+ flavor=flavor, meta=meta,
+ key_name=key_name,
+ files=files, nics=nics,
+ disk_config=disk_config,
+ config_drive=config_drive,
+ userdata=user_data,
+ block_device_mapping_v2=bdmv2,
+ **extra_create_args))
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+ else:
+ changed = True
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+
+ if not filter(lambda s: s.status not in FINAL_STATUSES,
+ servers):
+ break
+ time.sleep(5)
+
+ success = []
+ error = []
+ timeout = []
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+ instance = rax_to_dict(server, 'server')
+ if server.status == 'ACTIVE' or not wait:
+ success.append(instance)
+ elif server.status == 'ERROR':
+ error.append(instance)
+ elif wait:
+ timeout.append(instance)
+
+ untouched = [rax_to_dict(s, 'server') for s in existing]
+ instances = success + untouched
+
+ results = {
+ 'changed': changed,
+ 'action': 'create',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to build'
+ elif error:
+ results['msg'] = 'Failed to build all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
+ instance_ids = [] if instance_ids is None else instance_ids
+ kept = [] if kept is None else kept
+
+ cs = pyrax.cloudservers
+
+ changed = False
+ instances = {}
+ servers = []
+
+ for instance_id in instance_ids:
+ servers.append(cs.servers.get(instance_id))
+
+ for server in servers:
+ try:
+ server.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ changed = True
+
+ instance = rax_to_dict(server, 'server')
+ instances[instance['id']] = instance
+
+ # If requested, wait for server deletion
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ instance_id = server.id
+ try:
+ server.get()
+ except Exception:
+ instances[instance_id]['status'] = 'DELETED'
+ instances[instance_id]['rax_status'] = 'DELETED'
+
+ if not filter(lambda s: s['status'] not in ('', 'DELETED',
+ 'ERROR'),
+ instances.values()):
+ break
+
+ time.sleep(5)
+
+ timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
+ instances.values())
+ error = filter(lambda s: s['status'] in ('ERROR'),
+ instances.values())
+ success = filter(lambda s: s['status'] in ('', 'DELETED'),
+ instances.values())
+
+ instances = [rax_to_dict(s, 'server') for s in kept]
+
+ results = {
+ 'changed': changed,
+ 'action': 'delete',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to delete'
+ elif error:
+ results['msg'] = 'Failed to delete all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def cloudservers(module, state=None, name=None, flavor=None, image=None,
+ meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
+ disk_config=None, count=1, group=None, instance_ids=None,
+ exact_count=False, networks=None, count_offset=0,
+ auto_increment=False, extra_create_args=None, user_data=None,
+ config_drive=False, boot_from_volume=False,
+ boot_volume=None, boot_volume_size=None,
+ boot_volume_terminate=False):
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ instance_ids = [] if instance_ids is None else instance_ids
+ networks = [] if networks is None else networks
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+
+ cs = pyrax.cloudservers
+ cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present' or (state == 'absent' and instance_ids is None):
+ if not boot_from_volume and not boot_volume and not image:
+ module.fail_json(msg='image is required for the "rax" module')
+
+ for arg, value in dict(name=name, flavor=flavor).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax" module' %
+ arg)
+
+ if boot_from_volume and not image and not boot_volume:
+ module.fail_json(msg='image or boot_volume are required for the '
+ '"rax" with boot_from_volume')
+
+ if boot_from_volume and image and not boot_volume_size:
+ module.fail_json(msg='boot_volume_size is required for the "rax" '
+ 'module with boot_from_volume and image')
+
+ if boot_from_volume and image and boot_volume:
+ image = None
+
+ servers = []
+
+ # Add the group meta key
+ if group and 'group' not in meta:
+ meta['group'] = group
+ elif 'group' in meta and group is None:
+ group = meta['group']
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ # When using state=absent with group, the absent block won't match the
+ # names properly. Use the exact_count functionality to decrease the count
+ # to the desired level
+ was_absent = False
+ if group is not None and state == 'absent':
+ exact_count = True
+ state = 'present'
+ was_absent = True
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ # act on the state
+ if state == 'present':
+ # Idempotent ensurance of a specific count of servers
+ if exact_count is not False:
+ # See if we can find servers that match our options
+ if group is None:
+ module.fail_json(msg='"group" must be provided when using '
+ '"exact_count"')
+
+ if auto_increment:
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset, count_offset + count)
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ else: # Not auto incrementing
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ # available_numbers not needed here, we inspect auto_increment
+ # again later
+
+ # If state was absent but the count was changed,
+ # assume we only wanted to remove that number of instances
+ if was_absent:
+ diff = len(servers) - count
+ if diff < 0:
+ count = 0
+ else:
+ count = diff
+
+ if len(servers) > count:
+ # We have more servers than we need, set state='absent'
+ # and delete the extras, this should delete the oldest
+ state = 'absent'
+ kept = servers[:count]
+ del servers[:count]
+ instance_ids = []
+ for server in servers:
+ instance_ids.append(server.id)
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout, kept=kept)
+ elif len(servers) < count:
+ # we have fewer servers than we need
+ if auto_increment:
+ # auto incrementing server numbers
+ names = []
+ name_slice = count - len(servers)
+ numbers_to_use = available_numbers[:name_slice]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # We are not auto incrementing server numbers,
+ # create a list of 'name' that matches how many we need
+ names = [name] * (count - len(servers))
+ else:
+ # we have the right number of servers, just return info
+ # about all of the matched servers
+ instances = []
+ instance_ids = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+ instance_ids.append(server.id)
+ module.exit_json(changed=False, action=None,
+ instances=instances,
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+ else: # not called with exact_count=True
+ if group is not None:
+ if auto_increment:
+ # we are auto incrementing server numbers, but not with
+ # exact_count
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset,
+ count_offset + count + len(numbers))
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # Not auto incrementing
+ names = [name] * count
+ else:
+ # No group was specified, and not using exact_count
+ # Perform more simplistic matching
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ servers = []
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if server.metadata != meta:
+ continue
+ servers.append(server)
+
+ if len(servers) >= count:
+ # We have more servers than were requested, don't do
+ # anything. Not running with exact_count=True, so we assume
+ # more is OK
+ instances = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+
+ instance_ids = [i['id'] for i in instances]
+ module.exit_json(changed=False, action=None,
+ instances=instances, success=[], error=[],
+ timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ # We need more servers to reach out target, create names for
+ # them, we aren't performing auto_increment here
+ names = [name] * (count - len(servers))
+
+ block_device_mapping_v2 = []
+ if boot_from_volume:
+ mapping = {
+ 'boot_index': '0',
+ 'delete_on_termination': boot_volume_terminate,
+ 'destination_type': 'volume',
+ }
+ if image:
+ mapping.update({
+ 'uuid': image,
+ 'source_type': 'image',
+ 'volume_size': boot_volume_size,
+ })
+ image = None
+ elif boot_volume:
+ volume = rax_find_volume(module, pyrax, boot_volume)
+ mapping.update({
+ 'uuid': pyrax.utils.get_id(volume),
+ 'source_type': 'volume',
+ })
+ block_device_mapping_v2.append(mapping)
+
+ create(module, names=names, flavor=flavor, image=image,
+ meta=meta, key_name=key_name, files=files, wait=wait,
+ wait_timeout=wait_timeout, disk_config=disk_config, group=group,
+ nics=nics, extra_create_args=extra_create_args,
+ user_data=user_data, config_drive=config_drive,
+ existing=servers,
+ block_device_mapping_v2=block_device_mapping_v2)
+
+ elif state == 'absent':
+ if instance_ids is None:
+ # We weren't given an explicit list of server IDs to delete
+ # Let's match instead
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if meta != server.metadata:
+ continue
+
+ servers.append(server)
+
+ # Build a list of server IDs to delete
+ instance_ids = []
+ for server in servers:
+ if len(instance_ids) < count:
+ instance_ids.append(server.id)
+ else:
+ break
+
+ if not instance_ids:
+ # No server IDs were matched for deletion, or no IDs were
+ # explicitly provided, just exit and don't do anything
+ module.exit_json(changed=False, action=None, instances=[],
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': [],
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ auto_increment=dict(default=True, type='bool'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(type='str'),
+ boot_volume_size=dict(type='int', default=100),
+ boot_volume_terminate=dict(type='bool', default=False),
+ config_drive=dict(default=False, type='bool'),
+ count=dict(default=1, type='int'),
+ count_offset=dict(default=1, type='int'),
+ disk_config=dict(choices=['auto', 'manual']),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
+ files=dict(type='dict', default={}),
+ flavor=dict(),
+ group=dict(),
+ image=dict(),
+ instance_ids=dict(type='list'),
+ key_name=dict(aliases=['keypair']),
+ meta=dict(type='dict', default={}),
+ name=dict(),
+ networks=dict(type='list', default=['public', 'private']),
+ service=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ service = module.params.get('service')
+
+ if service is not None:
+ module.fail_json(msg='The "service" attribute has been deprecated, '
+ 'please remove "service: cloudservers" from your '
+ 'playbook pertaining to the "rax" module')
+
+ auto_increment = module.params.get('auto_increment')
+ boot_from_volume = module.params.get('boot_from_volume')
+ boot_volume = module.params.get('boot_volume')
+ boot_volume_size = module.params.get('boot_volume_size')
+ boot_volume_terminate = module.params.get('boot_volume_terminate')
+ config_drive = module.params.get('config_drive')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ group = module.params.get('group')
+ image = module.params.get('image')
+ instance_ids = module.params.get('instance_ids')
+ key_name = module.params.get('key_name')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloudservers(module, state=state, name=name, flavor=flavor,
+ image=image, meta=meta, key_name=key_name, files=files,
+ wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
+ count=count, group=group, instance_ids=instance_ids,
+ exact_count=exact_count, networks=networks,
+ count_offset=count_offset, auto_increment=auto_increment,
+ extra_create_args=extra_create_args, user_data=user_data,
+ config_drive=config_drive, boot_from_volume=boot_from_volume,
+ boot_volume=boot_volume, boot_volume_size=boot_volume_size,
+ boot_volume_terminate=boot_volume_terminate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
new file mode 100644
index 00000000..a681feff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+options:
+ description:
+ type: str
+ description:
+ - Description to give the volume being created
+ image:
+ type: str
+ description:
+ - image to use for bootable volumes. Can be an C(id), C(human_id) or
+ C(name). This option requires C(pyrax>=1.9.3)
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the volume
+ name:
+ type: str
+ description:
+ - Name to give the volume being created
+ required: true
+ size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes
+ default: 100
+ snapshot_id:
+ type: str
+ description:
+ - The id of the snapshot to create the volume from
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ volume_type:
+ type: str
+ description:
+ - Type of the volume being created
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ wait:
+ description:
+ - wait for the volume to be in state 'available' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+from distutils.version import LooseVersion
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image):
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if image:
+ # pyrax<1.9.3 did not have support for specifying an image when
+ # creating a volume which is required for bootable volumes
+ if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
+ module.fail_json(msg='Creating a bootable volume requires '
+ 'pyrax>=1.9.3')
+ image = rax_find_image(module, pyrax, image)
+
+ volume = rax_find_volume(module, pyrax, name)
+
+ if state == 'present':
+ if not volume:
+ kwargs = dict()
+ if image:
+ kwargs['image'] = image
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id, **kwargs)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ instance = rax_to_dict(volume)
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ instance = rax_to_dict(volume)
+ try:
+ volume.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(type='str'),
+ image=dict(type='str'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ image = module.params.get('image')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
new file mode 100644
index 00000000..71d01620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+options:
+ device:
+ type: str
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde.
+ - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
+ volume:
+ type: str
+ description:
+ - Name or id of the volume to attach/detach
+ required: true
+ server:
+ type: str
+ description:
+ - Name or id of the server to attach/detach
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: yes
+ state: present
+ register: my_volume
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
+ rax_argument_spec,
+ rax_find_server,
+ rax_find_volume,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ volume = rax_find_volume(module, pyrax, volume)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).items():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ volume.get()
+ result['volume'] = rax_to_dict(volume)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ result = dict(changed=changed, volume=rax_to_dict(volume))
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=False),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
new file mode 100644
index 00000000..5b9996cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb
+short_description: create/delete or resize a Rackspace Cloud Databases instance
+description:
+ - creates / deletes or resize a Rackspace Cloud Databases instance
+ and optionally waits for it to be 'running'. The name option needs to be
+ unique since it's used to identify the instance.
+options:
+ name:
+ type: str
+ description:
+ - Name of the databases server instance
+ required: yes
+ flavor:
+ type: int
+ description:
+ - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
+ default: 1
+ volume:
+ type: int
+ description:
+ - Volume size of the database 1-150GB
+ default: 2
+ cdb_type:
+ type: str
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ aliases: ['type']
+ cdb_version:
+ type: str
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ - "The available choices are: C(5.1), C(5.6) and C(10)."
+ default: 5.6
+ aliases: ['version']
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Databases
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax_cdb
+ credentials: ~/.raxpub
+ region: IAD
+ name: db-server1
+ flavor: 1
+ volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
+ wait: yes
+ state: present
+ register: rax_db_server
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_instance(name):
+
+ cdb = pyrax.cloud_databases
+ instances = cdb.list()
+ if instances:
+ for instance in instances:
+ if instance.name == name:
+ return instance
+ return False
+
+
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ for arg, value in dict(name=name, flavor=flavor,
+ volume=volume, type=cdb_type, version=cdb_version
+ ).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb"'
+ ' module' % arg)
+
+ if not (volume >= 1 and volume <= 150):
+ module.fail_json(msg='volume is required to be between 1 and 150')
+
+ cdb = pyrax.cloud_databases
+
+ flavors = []
+ for item in cdb.list_flavors():
+ flavors.append(item.id)
+
+ if not (flavor in flavors):
+ module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
+
+ changed = False
+
+ instance = find_instance(name)
+
+ if not instance:
+ action = 'create'
+ try:
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ else:
+ action = None
+
+ if instance.volume.size != volume:
+ action = 'resize'
+ if instance.volume.size > volume:
+ module.fail_json(changed=False, action=action,
+ msg='The new volume size must be larger than '
+ 'the current volume size',
+ cdb=rax_to_dict(instance))
+ instance.resize_volume(volume)
+ changed = True
+
+ if int(instance.flavor.id) != flavor:
+ action = 'resize'
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+ instance.resize(flavor)
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'ACTIVE':
+ module.fail_json(changed=changed, action=action,
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be created' % name)
+
+ module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
+
+
+def delete_instance(module, name, wait, wait_timeout):
+
+ if not name:
+ module.fail_json(msg='name is required for the "rax_cdb" module')
+
+ changed = False
+
+ instance = find_instance(name)
+ if not instance:
+ module.exit_json(changed=False, action='delete')
+
+ try:
+ instance.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'SHUTDOWN':
+ module.fail_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be deleted' % name)
+
+ module.exit_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance))
+
+
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ # act on the state
+ if state == 'present':
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
+ elif state == 'absent':
+ delete_instance(module, name, wait, wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ flavor=dict(type='int', default=1),
+ volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL', aliases=['type']),
+ cdb_version=dict(type='str', default='5.6', aliases=['version']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ flavor = module.params.get('flavor')
+ volume = module.params.get('volume')
+ cdb_type = module.params.get('cdb_type')
+ cdb_version = module.params.get('cdb_version')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
new file mode 100644
index 00000000..6d3435e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_cdb_database
+short_description: 'create / delete a database in the Cloud Databases'
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ name:
+ type: str
+ description:
+ - Name to give to the database
+ required: yes
+ character_set:
+ type: str
+ description:
+ - Set of symbols and encodings
+ default: 'utf8'
+ collate:
+ type: str
+ description:
+ - Set of rules for comparing characters in a character set
+ default: 'utf8_general_ci'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a database in Cloud Databases
+ tasks:
+ - name: Database build request
+ local_action:
+ module: rax_cdb_database
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ name: db1
+ state: present
+ register: rax_db_database
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_database(instance, name):
+ try:
+ database = instance.get_database(name)
+ except Exception:
+ return False
+
+ return database
+
+
+def save_database(module, cdb_id, name, character_set, collate):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if not database:
+ try:
+ database = instance.create_database(name=name,
+ character_set=character_set,
+ collate=collate)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='create',
+ database=rax_to_dict(database))
+
+
+def delete_database(module, cdb_id, name):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if database:
+ try:
+ database.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete',
+ database=rax_to_dict(database))
+
+
+def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
+
+ # act on the state
+ if state == 'present':
+ save_database(module, cdb_id, name, character_set, collate)
+ elif state == 'absent':
+ delete_database(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ character_set=dict(type='str', default='utf8'),
+ collate=dict(type='str', default='utf8_general_ci'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('name')
+ character_set = module.params.get('character_set')
+ collate = module.params.get('collate')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_database(module, state, cdb_id, name, character_set, collate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
new file mode 100644
index 00000000..34be49d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb_user
+short_description: create / delete a Rackspace Cloud Database
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ db_username:
+ type: str
+ description:
+ - Name of the database user
+ required: yes
+ db_password:
+ type: str
+ description:
+ - Database user password
+ required: yes
+ databases:
+ type: list
+ description:
+ - Name of the databases that the user can access
+ default: []
+ host:
+ type: str
+ description:
+ - Specifies the host from which a user is allowed to connect to
+ the database. Possible values are a string containing an IPv4 address
+ or "%" to allow connecting from any host
+ default: '%'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a user in Cloud Databases
+ tasks:
+ - name: User build request
+ local_action:
+ module: rax_cdb_user
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ db_username: user1
+ db_password: user1
+ databases: ['db1']
+ state: present
+ register: rax_db_user
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_user(instance, name):
+ try:
+ user = instance.get_user(name)
+ except Exception:
+ return False
+
+ return user
+
+
+def save_user(module, cdb_id, name, password, databases, host):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user" '
+ 'module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if not user:
+ action = 'create'
+ try:
+ user = instance.create_user(name=name,
+ password=password,
+ database_names=databases,
+ host=host)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+ else:
+ action = 'update'
+
+ if user.host != host:
+ changed = True
+
+ user.update(password=password, host=host)
+
+ former_dbs = set([item.name for item in user.list_user_access()])
+ databases = set(databases)
+
+ if databases != former_dbs:
+ try:
+ revoke_dbs = [db for db in former_dbs if db not in databases]
+ user.revoke_user_access(db_names=revoke_dbs)
+
+ new_dbs = [db for db in databases if db not in former_dbs]
+ user.grant_user_access(db_names=new_dbs)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
+
+
+def delete_user(module, cdb_id, name):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user"'
+ ' module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if user:
+ try:
+ user.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete')
+
+
+def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
+
+ # act on the state
+ if state == 'present':
+ save_user(module, cdb_id, name, password, databases, host)
+ elif state == 'absent':
+ delete_user(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ db_username=dict(type='str', required=True),
+ db_password=dict(type='str', required=True, no_log=True),
+ databases=dict(type='list', default=[]),
+ host=dict(type='str', default='%'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('db_username')
+ password = module.params.get('db_password')
+ databases = module.params.get('databases')
+ host = to_text(module.params.get('host'), errors='surrogate_or_strict')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_user(module, state, cdb_id, name, password, databases, host)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
new file mode 100644
index 00000000..5ff1e314
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb
+short_description: create / delete a load balancer in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud load balancer.
+options:
+ algorithm:
+ type: str
+ description:
+ - algorithm for the balancer being created
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
+ default: LEAST_CONNECTIONS
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the load balancer
+ required: yes
+ port:
+ type: int
+ description:
+ - Port for the balancer being created
+ default: 80
+ protocol:
+ type: str
+ description:
+ - Protocol for the balancer being created
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
+ default: HTTP
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ timeout:
+ type: int
+ description:
+ - timeout for communication between the balancer and the node
+ default: 30
+ type:
+ type: str
+ description:
+ - type of interface for the balancer being created
+ choices:
+ - PUBLIC
+ - SERVICENET
+ default: PUBLIC
+ vip_id:
+ type: str
+ description:
+ - Virtual IP ID to use when creating the load balancer for purposes of
+ sharing an IP with another load balancer of another protocol
+ wait:
+ description:
+ - wait for the balancer to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Load Balancer
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Balancer create request
+ local_action:
+ module: rax_clb
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 8080
+ protocol: HTTP
+ type: SERVICENET
+ timeout: 30
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_lb
+'''
+
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
+ CLB_PROTOCOLS,
+ rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id):
+ if int(timeout) < 30:
+ module.fail_json(msg='"timeout" must be greater than or equal to 30')
+
+ changed = False
+ balancers = []
+
+ clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ balancer_list = clb.list()
+ while balancer_list:
+ retrieved = clb.list(marker=balancer_list.pop().id)
+ balancer_list.extend(retrieved)
+ if len(retrieved) < 2:
+ break
+
+ for balancer in balancer_list:
+ if name != balancer.name and name != balancer.id:
+ continue
+
+ balancers.append(balancer)
+
+ if len(balancers) > 1:
+ module.fail_json(msg='Multiple Load Balancers were matched by name, '
+ 'try using the Load Balancer ID instead')
+
+ if state == 'present':
+ if isinstance(meta, dict):
+ metadata = [dict(key=k, value=v) for k, v in meta.items()]
+
+ if not balancers:
+ try:
+ virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
+ balancer = clb.create(name, metadata=metadata, port=port,
+ algorithm=algorithm, protocol=protocol,
+ timeout=timeout, virtual_ips=virtual_ips)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ balancer = balancers[0]
+ setattr(balancer, 'metadata',
+ [dict(key=k, value=v) for k, v in
+ balancer.get_metadata().items()])
+ atts = {
+ 'name': name,
+ 'algorithm': algorithm,
+ 'port': port,
+ 'protocol': protocol,
+ 'timeout': timeout
+ }
+ for att, value in atts.items():
+ current = getattr(balancer, att)
+ if current != value:
+ changed = True
+
+ if changed:
+ balancer.update(**atts)
+
+ if balancer.metadata != metadata:
+ balancer.set_metadata(meta)
+ changed = True
+
+ virtual_ips = [clb.VirtualIP(type=vip_type)]
+ current_vip_types = set([v.type for v in balancer.virtual_ips])
+ vip_types = set([v.type for v in virtual_ips])
+ if current_vip_types != vip_types:
+ module.fail_json(msg='Load balancer Virtual IP type cannot '
+ 'be changed')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ instance = rax_to_dict(balancer, 'clb')
+
+ result = dict(changed=changed, balancer=instance)
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if balancers:
+ balancer = balancers[0]
+ try:
+ balancer.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ instance = rax_to_dict(balancer, 'clb')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
+ interval=5, attempts=attempts)
+ else:
+ instance = {}
+
+ module.exit_json(changed=changed, balancer=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ algorithm=dict(choices=CLB_ALGORITHMS,
+ default='LEAST_CONNECTIONS'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ port=dict(type='int', default=80),
+ protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
+ state=dict(default='present', choices=['present', 'absent']),
+ timeout=dict(type='int', default=30),
+ type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
+ vip_id=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ algorithm = module.params.get('algorithm')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ state = module.params.get('state')
+ timeout = int(module.params.get('timeout'))
+ vip_id = module.params.get('vip_id')
+ vip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
new file mode 100644
index 00000000..c066ab66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb_nodes
+short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
+description:
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
+options:
+ address:
+ type: str
+ required: false
+ description:
+ - IP address or domain name of the node
+ condition:
+ type: str
+ required: false
+ choices:
+ - enabled
+ - disabled
+ - draining
+ description:
+ - Condition for the node, which determines its role within the load
+ balancer
+ load_balancer_id:
+ type: int
+ required: true
+ description:
+ - Load balancer id
+ node_id:
+ type: int
+ required: false
+ description:
+ - Node id
+ port:
+ type: int
+ required: false
+ description:
+ - Port number of the load balanced service on the node
+ state:
+ type: str
+ required: false
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Indicate desired state of the node
+ type:
+ type: str
+ required: false
+ choices:
+ - primary
+ - secondary
+ description:
+ - Type of node
+ wait:
+ required: false
+ default: "no"
+ type: bool
+ description:
+ - Wait for the load balancer to become active before returning
+ wait_timeout:
+ type: int
+ required: false
+ default: 30
+ description:
+ - How long to wait before giving up and returning an error
+ weight:
+ type: int
+ required: false
+ description:
+ - Weight of node
+ virtualenv:
+ type: path
+ description:
+ - Virtualenv to execute this module in
+author: "Lukasz Kawczynski (@neuroid)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Add a new node to the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ address: 10.2.2.3
+ port: 80
+ condition: enabled
+ type: primary
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Drain connections from a node
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ condition: draining
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Remove a node from the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ state: absent
+ wait: yes
+ credentials: /path/to/credentials
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
+
+
+def _activate_virtualenv(path):
+ activate_this = os.path.join(path, 'bin', 'activate_this.py')
+ with open(activate_this) as f:
+ code = compile(f.read(), activate_this, 'exec')
+ exec(code)
+
+
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
+ return node
+
+ return None
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ condition=dict(choices=['enabled', 'disabled', 'draining']),
+ load_balancer_id=dict(required=True, type='int'),
+ node_id=dict(type='int'),
+ port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ type=dict(choices=['primary', 'secondary']),
+ virtualenv=dict(type='path'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=30, type='int'),
+ weight=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params['address']
+ condition = (module.params['condition'] and
+ module.params['condition'].upper())
+ load_balancer_id = module.params['load_balancer_id']
+ node_id = module.params['node_id']
+ port = module.params['port']
+ state = module.params['state']
+ typ = module.params['type'] and module.params['type'].upper()
+ virtualenv = module.params['virtualenv']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout'] or 1
+ weight = module.params['weight']
+
+ if virtualenv:
+ try:
+ _activate_virtualenv(virtualenv)
+ except IOError as e:
+ module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
+ virtualenv, e))
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.cloud_loadbalancers:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ node = _get_node(lb, node_id, address, port)
+
+ result = rax_clb_node_to_dict(node)
+
+ if state == 'absent':
+ if not node: # Removing a non-existent node
+ module.exit_json(changed=False, state=state)
+ try:
+ lb.delete_node(node)
+ result = {}
+ except pyrax.exc.NotFound:
+ module.exit_json(changed=False, state=state)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # present
+ if not node:
+ if node_id: # Updating a non-existent node
+ msg = 'Node %d not found' % node_id
+ if lb.nodes:
+ msg += (' (available nodes: %s)' %
+ ', '.join([str(x.id) for x in lb.nodes]))
+ module.fail_json(msg=msg)
+ else: # Creating a new node
+ try:
+ node = pyrax.cloudloadbalancers.Node(
+ address=address, port=port, condition=condition,
+ weight=weight, type=typ)
+ resp, body = lb.add_nodes([node])
+ result.update(body['nodes'][0])
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # Updating an existing node
+ mutable = {
+ 'condition': condition,
+ 'type': typ,
+ 'weight': weight,
+ }
+
+ for name, value in mutable.items():
+ if value is None or value == getattr(node, name):
+ mutable.pop(name)
+
+ if not mutable:
+ module.exit_json(changed=False, state=state, node=result)
+
+ try:
+ # The diff has to be set explicitly to update node's weight and
+ # type; this should probably be fixed in pyrax
+ lb.update_node(node, diff=mutable)
+ result.update(mutable)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if wait:
+ pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
+ attempts=wait_timeout)
+ if lb.status != 'ACTIVE':
+ module.fail_json(
+ msg='Load balancer not active after %ds (current status: %s)' %
+ (wait_timeout, lb.status.lower()))
+
+ kwargs = {'node': result} if result else {}
+ module.exit_json(changed=True, state=state, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
new file mode 100644
index 00000000..114128e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+options:
+ loadbalancer:
+ type: str
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ type: bool
+ private_key:
+ type: str
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ type: str
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ type: str
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ type: int
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ type: bool
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ type: bool
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ type: bool
+ wait_timeout:
+ type: int
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ community.general.rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ community.general.rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout // 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.items():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(no_log=True),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
new file mode 100644
index 00000000..e9b7e2be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns
+short_description: Manage domains on Rackspace Cloud DNS
+description:
+ - Manage domains on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ email:
+ type: str
+ description:
+ - Email address of the domain administrator
+ name:
+ type: str
+ description:
+ - Domain name to create
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of domain in seconds
+ default: 3600
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create domain
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Domain create request
+ local_action:
+ module: rax_dns
+ credentials: ~/.raxpub
+ name: example.org
+ email: admin@example.org
+ register: rax_dns
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns(module, comment, email, name, state, ttl):
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not email:
+ module.fail_json(msg='An "email" attribute is required for '
+ 'creating a domain')
+
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ domain = dns.create(name=name, emailAddress=email, ttl=ttl,
+ comment=comment)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(domain, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(domain, 'ttl', None):
+ update['ttl'] = ttl
+ if email != getattr(domain, 'emailAddress', None):
+ update['emailAddress'] = email
+
+ if update:
+ try:
+ domain.update(**update)
+ changed = True
+ domain.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NotFound:
+ domain = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if domain:
+ try:
+ domain.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, domain=rax_to_dict(domain))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ email=dict(),
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ email = module.params.get('email')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+
+ setup_rax_module(module, pyrax, False)
+
+ rax_dns(module, comment, email, name, state, ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
new file mode 100644
index 00000000..0b60120a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns_record
+short_description: Manage DNS records on Rackspace Cloud DNS
+description:
+ - Manage DNS records on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ data:
+ type: str
+ description:
+ - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
+ SRV/TXT
+ required: True
+ domain:
+ type: str
+ description:
+ - Domain name to create the record in. This is an invalid option when
+ type=PTR
+ loadbalancer:
+ type: str
+ description:
+ - Load Balancer ID to create a PTR record for. Only used with type=PTR
+ name:
+ type: str
+ description:
+ - FQDN record name to create
+ required: True
+ overwrite:
+ description:
+ - Add new records if data doesn't match, instead of updating existing
+ record with matching name. If there are already multiple records with
+ matching name and overwrite=true, this module will fail.
+ default: true
+ type: bool
+ priority:
+ type: int
+ description:
+ - Required for MX and SRV records, but forbidden for other record types.
+ If specified, must be an integer from 0 to 65535.
+ server:
+ type: str
+ description:
+ - Server ID to create a PTR record for. Only used with type=PTR
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of record in seconds
+ default: 3600
+ type:
+ type: str
+ description:
+ - DNS record type
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
+ - PTR
+ required: true
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+ - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
+ supplied
+ - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
+ - C(PTR) record support was added in version 1.7
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create DNS Records
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Create A record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ domain: example.org
+ name: www.example.org
+ data: "{{ rax_accessipv4 }}"
+ type: A
+ register: a_record
+
+ - name: Create PTR record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ server: "{{ rax_id }}"
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ register: ptr_record
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_find_server,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
+ name=None, server=None, state='present', ttl=7200):
+ changed = False
+ results = []
+
+ dns = pyrax.cloud_dns
+
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if loadbalancer:
+ item = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ elif server:
+ item = rax_find_server(module, pyrax, server)
+
+ if state == 'present':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ if record.ttl != ttl or record.name != name:
+ try:
+ dns.update_ptr_record(item, record, name, data, ttl)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ record.ttl = ttl
+ record.name = name
+ results.append(rax_to_dict(record))
+ break
+ else:
+ results.append(rax_to_dict(record))
+ break
+
+ if not results:
+ record = dict(name=name, type='PTR', data=data, ttl=ttl,
+ comment=comment)
+ try:
+ results = dns.add_ptr_records(item, [record])
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+ elif state == 'absent':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ results.append(rax_to_dict(record))
+ break
+
+ if results:
+ try:
+ dns.delete_ptr_records(item, data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+
+def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
+ overwrite=True, priority=None, record_type='A',
+ state='present', ttl=7200):
+ """Function for manipulating record types other than PTR"""
+
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not priority and record_type in ['MX', 'SRV']:
+ module.fail_json(msg='A "priority" attribute is required for '
+ 'creating a MX or SRV record')
+
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ if overwrite:
+ record = domain.find_record(record_type, name=name)
+ else:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='overwrite=true and there are multiple matching records')
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ try:
+ record_data = {
+ 'type': record_type,
+ 'name': name,
+ 'data': data,
+ 'ttl': ttl
+ }
+ if comment:
+ record_data.update(dict(comment=comment))
+ if priority and record_type.upper() in ['MX', 'SRV']:
+ record_data.update(dict(priority=priority))
+
+ record = domain.add_records([record_data])[0]
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(record, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(record, 'ttl', None):
+ update['ttl'] = ttl
+ if priority != getattr(record, 'priority', None):
+ update['priority'] = priority
+ if data != getattr(record, 'data', None):
+ update['data'] = data
+
+ if update:
+ try:
+ record.update(**update)
+ changed = True
+ record.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ record = {}
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if record:
+ try:
+ record.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, record=rax_to_dict(record))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ data=dict(required=True),
+ domain=dict(),
+ loadbalancer=dict(),
+ name=dict(required=True),
+ overwrite=dict(type='bool', default=True),
+ priority=dict(type='int'),
+ server=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
+ 'SRV', 'TXT', 'PTR'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ required_one_of=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ data = module.params.get('data')
+ domain = module.params.get('domain')
+ loadbalancer = module.params.get('loadbalancer')
+ name = module.params.get('name')
+ overwrite = module.params.get('overwrite')
+ priority = module.params.get('priority')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+ record_type = module.params.get('type')
+
+ setup_rax_module(module, pyrax, False)
+
+ if record_type.upper() == 'PTR':
+ if not server and not loadbalancer:
+ module.fail_json(msg='one of the following is required: '
+ 'server,loadbalancer')
+ rax_dns_record_ptr(module, data=data, comment=comment,
+ loadbalancer=loadbalancer, name=name, server=server,
+ state=state, ttl=ttl)
+ else:
+ rax_dns_record(module, comment=comment, data=data, domain=domain,
+ name=name, overwrite=overwrite, priority=priority,
+ record_type=record_type, state=state, ttl=ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
new file mode 100644
index 00000000..386ca7cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_facts
+short_description: Gather facts for Rackspace Cloud Servers
+description:
+ - Gather facts for Rackspace Cloud Servers.
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to retrieve facts for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to retrieve facts for
+ name:
+ type: str
+ description:
+ - Server name to retrieve facts for
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Gather info about servers
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Get facts about servers
+ local_action:
+ module: rax_facts
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ - name: Map some facts
+ ansible.builtin.set_fact:
+ ansible_ssh_host: "{{ rax_accessipv4 }}"
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_facts(module, address, name, server_id):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ ansible_facts = {}
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ servers[:] = [server for server in servers if server.status != "DELETED"]
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif len(servers) == 1:
+ ansible_facts = rax_to_dict(servers[0], 'server')
+
+ module.exit_json(changed=changed, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+
+ setup_rax_module(module, pyrax)
+
+ rax_facts(module, address, name, server_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
new file mode 100644
index 00000000..7080cc2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files
+short_description: Manipulate Rackspace Cloud Files Containers
+description:
+ - Manipulate Rackspace Cloud Files Containers
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing containers.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: "no"
+ container:
+ type: str
+ description:
+ - The container to use for container or metadata operations.
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on a container
+ private:
+ description:
+ - Used to set a container as private, removing it from the CDN. B(Warning!)
+ Private containers, if previously made public, can have live objects
+ available until the TTL on cached objects expires
+ type: bool
+ default: false
+ public:
+ description:
+ - Used to set a container as public, available via the Cloud Files CDN
+ type: bool
+ default: false
+ region:
+ type: str
+ description:
+ - Region to create an instance in
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent', 'list']
+ default: present
+ ttl:
+ type: int
+ description:
+ - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
+ Setting a TTL is only appropriate for containers that are public
+ type:
+ type: str
+ description:
+ - Type of object to do work on, i.e. metadata object or a container object
+ choices:
+ - container
+ - meta
+ default: container
+ web_error:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
+ web_index:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Containers"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "List all containers"
+ community.general.rax_files:
+ state: list
+
+ - name: "Create container called 'mycontainer'"
+ community.general.rax_files:
+ container: mycontainer
+
+ - name: "Create container 'mycontainer2' with metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ meta:
+ key: value
+ file_for: someuser@example.com
+
+ - name: "Set a container's web index page"
+ community.general.rax_files:
+ container: mycontainer
+ web_index: index.html
+
+ - name: "Set a container's web error page"
+ community.general.rax_files:
+ container: mycontainer
+ web_error: error.html
+
+ - name: "Make container public"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+
+ - name: "Make container public with a 24 hour TTL"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+ ttl: 86400
+
+ - name: "Make container private"
+ community.general.rax_files:
+ container: mycontainer
+ private: yes
+
+- name: "Test Cloud Files Containers Metadata Storage"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "Get mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+
+ - name: "Set mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+ meta:
+ uploaded_by: someuser@example.com
+
+ - name: "Remove mycontainer2 metadata"
+ community.general.rax_files:
+ container: "mycontainer2"
+ type: meta
+ state: absent
+ meta:
+ key: ""
+ file_for: ""
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError as e:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=True)
+META_PREFIX = 'x-container-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _fetch_meta(module, container):
+ EXIT_DICT['meta'] = dict()
+ try:
+ for k, v in container.get_metadata().items():
+ split_key = k.split(META_PREFIX)[-1]
+ EXIT_DICT['meta'][split_key] = v
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+
+def meta(cf, module, container_, state, meta_, clear_meta):
+ c = _get_container(module, cf, container_)
+
+ if meta_ and state == 'present':
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif meta_ and state == 'absent':
+ remove_results = []
+ for k, v in meta_.items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+ elif state == 'absent':
+ remove_results = []
+ for k, v in c.get_metadata().items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+
+ _fetch_meta(module, c)
+ _locals = locals().keys()
+
+ EXIT_DICT['container'] = c.name
+ if 'meta_set' in _locals or 'remove_results' in _locals:
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
+ if public and private:
+ module.fail_json(msg='container cannot be simultaneously '
+ 'set to public and private')
+
+ if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
+ module.fail_json(msg='state cannot be omitted when setting/removing '
+ 'attributes on a container')
+
+ if state == 'list':
+ # We don't care if attributes are specified, let's list containers
+ EXIT_DICT['containers'] = cf.list_containers()
+ module.exit_json(**EXIT_DICT)
+
+ try:
+ c = cf.get_container(container_)
+ except pyrax.exc.NoSuchContainer as e:
+ # Make the container if state=present, otherwise bomb out
+ if state == 'present':
+ try:
+ c = cf.create_container(container_)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['created'] = True
+ else:
+ module.fail_json(msg=e.message)
+ else:
+ # Successfully grabbed a container object
+ # Delete if state is absent
+ if state == 'absent':
+ try:
+ cont_deleted = c.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['deleted'] = True
+
+ if meta_:
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ finally:
+ _fetch_meta(module, c)
+
+ if ttl:
+ try:
+ c.cdn_ttl = ttl
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['ttl'] = c.cdn_ttl
+
+ if public:
+ try:
+ cont_public = c.make_public()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
+ ssl_url=c.cdn_ssl_uri,
+ streaming_url=c.cdn_streaming_uri,
+ ios_uri=c.cdn_ios_uri)
+
+ if private:
+ try:
+ cont_private = c.make_private()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_private'] = True
+
+ if web_index:
+ try:
+ cont_web_index = c.set_web_index_page(web_index)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_index'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ if web_error:
+ try:
+ cont_err_index = c.set_web_error_page(web_error)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_error'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['objs_in_container'] = c.object_count
+ EXIT_DICT['total_bytes'] = c.total_bytes
+
+ _locals = locals().keys()
+ if ('cont_deleted' in _locals
+ or 'meta_set' in _locals
+ or 'cont_public' in _locals
+ or 'cont_private' in _locals
+ or 'cont_web_index' in _locals
+ or 'cont_err_index' in _locals):
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ type=dict(choices=['container', 'meta'], default='container'),
+ ttl=dict(type='int'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
+ web_index=dict(),
+ web_error=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container_ = module.params.get('container')
+ state = module.params.get('state')
+ meta_ = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ typ = module.params.get('type')
+ ttl = module.params.get('ttl')
+ public = module.params.get('public')
+ private = module.params.get('private')
+ web_index = module.params.get('web_index')
+ web_error = module.params.get('web_error')
+
+ if state in ['present', 'absent'] and not container_:
+ module.fail_json(msg='please specify a container name')
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
new file mode 100644
index 00000000..dc445554
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files_objects
+short_description: Upload, download, and delete objects in Rackspace Cloud Files
+description:
+ - Upload, download, and delete objects in Rackspace Cloud Files
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing objects.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: 'no'
+ container:
+ type: str
+ description:
+ - The container to use for file object operations.
+ required: true
+ dest:
+ type: str
+ description:
+ - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
+ Used to specify the destination of an operation on a remote object; i.e. a file name,
+ "file1", or a comma-separated list of remote objects, "file1,file2,file17"
+ expires:
+ type: int
+ description:
+ - Used to set an expiration on a file or folder uploaded to Cloud Files.
+ Requires an integer, specifying expiration in seconds
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on an uploaded file or folder
+ method:
+ type: str
+ description:
+ - The method of operation to be performed. For example, put to upload files
+ to Cloud Files, get to download files from Cloud Files or delete to delete
+ remote objects in Cloud Files
+ choices:
+ - get
+ - put
+ - delete
+ default: get
+ src:
+ type: str
+ description:
+ - Source from which to upload files. Used to specify a remote object as a source for
+ an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
+ "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
+ structure:
+ description:
+ - Used to specify whether to maintain nested directory structure when downloading objects
+ from Cloud Files. Setting to false downloads the contents of a container to a single,
+ flat directory
+ type: bool
+ default: 'yes'
+ type:
+ type: str
+ description:
+ - Type of object to do work on
+ - Metadata object or a file object
+ choices:
+ - file
+ - meta
+ default: file
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Objects"
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: "Get objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
+
+ - name: "Get single object from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
+
+ - name: "Get several objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
+
+ - name: "Delete one object in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
+
+ - name: "Delete several objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
+
+ - name: "Delete all objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+
+ - name: "Upload all files to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
+
+ - name: "Upload one file to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
+
+ - name: "Upload one file to test container with metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ src: ~/Downloads/testcont/file2
+ method: put
+ meta:
+ testkey: testdata
+ who_uploaded_this: someuser@example.com
+
+ - name: "Upload one file to test container with TTL of 60 seconds"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
+
+ - name: "Attempt to get remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
+ ignore_errors: yes
+
+ - name: "Attempt to delete remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
+ ignore_errors: yes
+
+- name: "Test Cloud Files Objects Metadata"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get metadata on one object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
+
+ - name: "Get metadata on several objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
+
+ - name: "Set metadata on an object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: put
+ meta:
+ key1: value1
+ key2: value2
+ clear_meta: true
+
+ - name: "Verify metadata is set"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
+
+ - name: "Delete metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: delete
+ meta:
+ key1: ''
+ key2: ''
+
+ - name: "Get metadata on all objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=False)
+META_PREFIX = 'x-object-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _upload_folder(cf, folder, container, ttl=None, headers=None):
+ """ Uploads a folder to Cloud Files.
+ """
+ total_bytes = 0
+ for root, dirs, files in os.walk(folder):
+ for fname in files:
+ full_path = os.path.join(root, fname)
+ obj_name = os.path.relpath(full_path, folder)
+ obj_size = os.path.getsize(full_path)
+ cf.upload_file(container, full_path,
+ obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
+ total_bytes += obj_size
+ return total_bytes
+
+
+def upload(module, cf, container, src, dest, meta, expires):
+ """ Uploads a single object or a folder to Cloud Files Optionally sets an
+ metadata, TTL value (expires), or Content-Disposition and Content-Encoding
+ headers.
+ """
+ if not src:
+ module.fail_json(msg='src must be specified when uploading')
+
+ c = _get_container(module, cf, container)
+ src = os.path.abspath(os.path.expanduser(src))
+ is_dir = os.path.isdir(src)
+
+ if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
+ module.fail_json(msg='src must be a file or a directory')
+ if dest and is_dir:
+ module.fail_json(msg='dest cannot be set when whole '
+ 'directories are uploaded')
+
+ cont_obj = None
+ total_bytes = 0
+ if dest and not is_dir:
+ try:
+ cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif is_dir:
+ try:
+ total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ try:
+ cont_obj = c.upload_file(src, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['success'] = True
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
+ if cont_obj or total_bytes > 0:
+ EXIT_DICT['changed'] = True
+ if meta:
+ EXIT_DICT['meta'] = dict(updated=True)
+
+ if cont_obj:
+ EXIT_DICT['bytes'] = cont_obj.total_bytes
+ EXIT_DICT['etag'] = cont_obj.etag
+ else:
+ EXIT_DICT['bytes'] = total_bytes
+
+ module.exit_json(**EXIT_DICT)
+
+
+def download(module, cf, container, src, dest, structure):
+ """ Download objects from Cloud Files to a local path specified by "dest".
+ Optionally disable maintaining a directory structure by by passing a
+ false value to "structure".
+ """
+ # Looking for an explicit destination
+ if not dest:
+ module.fail_json(msg='dest is a required argument when '
+ 'downloading from Cloud Files')
+
+ # Attempt to fetch the container by name
+ c = _get_container(module, cf, container)
+
+ # Accept a single object name or a comma-separated list of objs
+ # If not specified, get the entire container
+ if src:
+ objs = src.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ dest = os.path.abspath(os.path.expanduser(dest))
+ is_dir = os.path.isdir(dest)
+
+ if not is_dir:
+ module.fail_json(msg='dest must be a directory')
+
+ results = []
+ for obj in objs:
+ try:
+ c.download_object(obj, dest, structure=structure)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(obj)
+
+ len_results = len(results)
+ len_objs = len(objs)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['requested_downloaded'] = results
+ if results:
+ EXIT_DICT['changed'] = True
+ if len_results == len_objs:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
+ else:
+ EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
+ "downloaded" % (len_results, len_objs)
+ module.exit_json(**EXIT_DICT)
+
+
+def delete(module, cf, container, src, dest):
+ """ Delete specific objects by proving a single file name or a
+ comma-separated list to src OR dest (but not both). Omitting file name(s)
+ assumes the entire container is to be deleted.
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ c = _get_container(module, cf, container)
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ num_objs = len(objs)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.delete_object(obj)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ num_deleted = results.count(True)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['deleted'] = num_deleted
+ EXIT_DICT['requested_deleted'] = objs
+
+ if num_deleted:
+ EXIT_DICT['changed'] = True
+
+ if num_objs == num_deleted:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
+ else:
+ EXIT_DICT['msg'] = ("Error: only %s of %s objects "
+ "deleted" % (num_deleted, num_objs))
+ module.exit_json(**EXIT_DICT)
+
+
+def get_meta(module, cf, container, src, dest):
+ """ Get metadata for a single file, comma-separated list, or entire
+ container
+ """
+ c = _get_container(module, cf, container)
+
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ results = dict()
+ for obj in objs:
+ try:
+ meta = c.get_object(obj).get_metadata()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results[obj] = dict()
+ for k, v in meta.items():
+ meta_key = k.split(META_PREFIX)[-1]
+ results[obj][meta_key] = v
+
+ EXIT_DICT['container'] = c.name
+ if results:
+ EXIT_DICT['meta_results'] = results
+ EXIT_DICT['success'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def put_meta(module, cf, container, src, dest, meta, clear_meta):
+ """ Set metadata on a container, single file, or comma-separated list.
+ Passing a true value to clear_meta clears the metadata stored in Cloud
+ Files before setting the new metadata to the value of "meta".
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to set meta"
+ " have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_changed'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def delete_meta(module, cf, container, src, dest, meta):
+ """ Removes metadata keys and values specified in meta, if any. Deletes on
+ all objects specified by src or dest (but not both), if any; otherwise it
+ deletes keys on all objects in the container
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
+ "deleted have been specified on both src and dest"
+ " args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = [] # Num of metadata keys removed, not objects affected
+ for obj in objs:
+ if meta:
+ for k, v in meta.items():
+ try:
+ result = c.get_object(obj).remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+ else:
+ try:
+ o = c.get_object(obj)
+ except pyrax.exc.NoSuchObject as e:
+ module.fail_json(msg=e.message)
+
+ for k, v in o.get_metadata().items():
+ try:
+ result = o.remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_deleted'] = len(results)
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
+ structure, expires):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "file":
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
+
+ elif method == 'get':
+ download(module, cf, container, src, dest, structure)
+
+ elif method == 'delete':
+ delete(module, cf, container, src, dest)
+
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
+
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(required=True),
+ src=dict(),
+ dest=dict(),
+ method=dict(default='get', choices=['put', 'get', 'delete']),
+ type=dict(default='file', choices=['file', 'meta']),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
+ expires=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container = module.params.get('container')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ method = module.params.get('method')
+ typ = module.params.get('type')
+ meta = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ structure = module.params.get('structure')
+ expires = module.params.get('expires')
+
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
new file mode 100644
index 00000000..330c510d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present']
+ default: present
+ required: false
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def cloud_identity(module, state, identity):
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ instance.update(rax_to_dict(identity))
+ instance['services'] = instance.get('services', {}).keys()
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.identity:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
new file mode 100644
index 00000000..0314883f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_keypair
+short_description: Create a keypair for use with Rackspace Cloud Servers
+description:
+ - Create a keypair for use with Rackspace Cloud Servers
+options:
+ name:
+ type: str
+ description:
+ - Name of keypair
+ required: true
+ public_key:
+ type: str
+ description:
+ - Public Key string to upload. Can be a file path or string
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+notes:
+ - Keypairs cannot be manipulated, only created and deleted. To "update" a
+ keypair you must first delete and then recreate.
+ - The ability to specify a file path for the public key was added in 1.7
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ region: DFW
+ register: keypair
+ - name: Create local public key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.public_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
+ - name: Create local private key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.private_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
+
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_keypair(module, name, public_key, state):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ keypair = {}
+
+ if state == 'present':
+ if public_key and os.path.isfile(public_key):
+ try:
+ f = open(public_key)
+ public_key = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % public_key)
+
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except cs.exceptions.NotFound:
+ try:
+ keypair = cs.keypairs.create(name, public_key)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except Exception:
+ pass
+
+ if keypair:
+ try:
+ keypair.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ public_key=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ public_key = module.params.get('public_key')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ rax_keypair(module, name, public_key, state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
new file mode 100644
index 00000000..b7d172d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_meta
+short_description: Manipulate metadata for Rackspace Cloud Servers
+description:
+ - Manipulate metadata for Rackspace Cloud Servers
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to modify metadata for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to modify metadata for
+ name:
+ type: str
+ description:
+ - Server name to modify metadata for
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Set metadata for a server
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Set metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ meta:
+ group: primary_group
+ groups:
+ - group_two
+ - group_three
+ app: my_app
+
+ - name: Clear metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+'''
+
+import json
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+from ansible.module_utils.six import string_types
+
+
+def rax_meta(module, address, name, server_id, meta):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif not servers:
+ module.fail_json(msg='Failed to find a server matching provided '
+ 'search parameters')
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ server = servers[0]
+ if server.metadata == meta:
+ changed = False
+ else:
+ changed = True
+ removed = set(server.metadata.keys()).difference(meta.keys())
+ cs.servers.delete_meta(server, list(removed))
+ cs.servers.set_meta(server, meta)
+ server.get()
+
+ module.exit_json(changed=changed, meta=server.metadata)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ meta=dict(type='dict', default=dict()),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+ meta = module.params.get('meta')
+
+ setup_rax_module(module, pyrax)
+
+ rax_meta(module, address, name, server_id, meta)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
new file mode 100644
index 00000000..8de26609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ type: str
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ type: str
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ type: str
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ type: str
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ type: str
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ community.general.rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
new file mode 100644
index 00000000..e04dfc74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ entity_id:
+ type: str
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ type: str
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ type: str
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ - |
+ Choices for this option are:
+ - C(remote.dns)
+ - C(remote.ftp-banner)
+ - C(remote.http)
+ - C(remote.imap-banner)
+ - C(remote.mssql-banner)
+ - C(remote.mysql-banner)
+ - C(remote.ping)
+ - C(remote.pop3-banner)
+ - C(remote.postgresql-banner)
+ - C(remote.smtp-banner)
+ - C(remote.smtp)
+ - C(remote.ssh)
+ - C(remote.tcp)
+ - C(remote.telnet-banner)
+ - C(agent.filesystem)
+ - C(agent.memory)
+ - C(agent.load_average)
+ - C(agent.cpu)
+ - C(agent.disk)
+ - C(agent.network)
+ - C(agent.plugin)
+ required: true
+ monitoring_zones_poll:
+ type: str
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ type: str
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ type: str
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ type: dict
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ type: int
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ type: int
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ community.general.rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.items():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
new file mode 100644
index 00000000..69f49cd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ label:
+ type: str
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ type: str
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ agent_id:
+ type: str
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ type: dict
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ community.general.rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
new file mode 100644
index 00000000..416d03ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ type: str
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ type: dict
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
new file mode 100644
index 00000000..a4b8920d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ community.general.rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
new file mode 100644
index 00000000..27a793b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_network
+short_description: create / delete an isolated network in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud isolated network.
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ label:
+ type: str
+ description:
+ - Label (name) to give the network
+ required: yes
+ cidr:
+ type: str
+ description:
+ - cidr of the network being created
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Jesse Keating (@omgjlk)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build an Isolated Network
+ gather_facts: False
+
+ tasks:
+ - name: Network create request
+ local_action:
+ module: rax_network
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_network(module, state, label, cidr):
+ changed = False
+ network = None
+ networks = []
+
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not cidr:
+ module.fail_json(msg='missing required arguments: cidr')
+
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ except pyrax.exceptions.NetworkNotFound:
+ try:
+ network = pyrax.cloud_networks.create(label, cidr=cidr)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ network.delete()
+ changed = True
+ except pyrax.exceptions.NetworkNotFound:
+ pass
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if network:
+ instance = dict(id=network.id,
+ label=network.label,
+ cidr=network.cidr)
+ networks.append(instance)
+
+ module.exit_json(changed=changed, networks=networks)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present',
+ choices=['present', 'absent']),
+ label=dict(required=True),
+ cidr=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ cidr = module.params.get('cidr')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_network(module, state, label, cidr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
new file mode 100644
index 00000000..dca006da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_queue
+short_description: create / delete a queue in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud queue.
+options:
+ name:
+ type: str
+ description:
+ - Name to give the queue
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Queue
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Queue create request
+ local_action:
+ module: rax_queue
+ credentials: ~/.raxpub
+ name: my-queue
+ region: DFW
+ state: present
+ register: my_queue
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_queue(module, state, name):
+ for arg in (state, name):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_queue' % arg)
+
+ changed = False
+ queues = []
+ instance = {}
+
+ cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ for queue in cq.list():
+ if name != queue.name:
+ continue
+
+ queues.append(queue)
+
+ if len(queues) > 1:
+ module.fail_json(msg='Multiple Queues were matched by name')
+
+ if state == 'present':
+ if not queues:
+ try:
+ queue = cq.create(name)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ queue = queues[0]
+
+ instance = dict(name=queue.name)
+ result = dict(changed=changed, queue=instance)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if queues:
+ queue = queues[0]
+ try:
+ queue.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, queue=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_queue(module, state, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
new file mode 100644
index 00000000..7b2b6ace
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+options:
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified, it will fallback to C(auto).
+ choices:
+ - auto
+ - manual
+ files:
+ type: dict
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ loadbalancers:
+ type: list
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ type: int
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ min_entities:
+ type: int
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ type: str
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import base64
+import json
+import os
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six import string_types
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
+ image=None, key_name=None, loadbalancers=None, meta=None,
+ min_entities=0, max_entities=0, name=None, networks=None,
+ server_name=None, state='present', user_data=None,
+ config_drive=False, wait=True, wait_timeout=300):
+ files = {} if files is None else files
+ loadbalancers = [] if loadbalancers is None else loadbalancers
+ meta = {} if meta is None else meta
+ networks = [] if networks is None else networks
+
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(user_data):
+ try:
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ for nic in nics:
+ # pyrax is currently returning net-id, but we need uuid
+ # this check makes this forward compatible for a time when
+ # pyrax uses uuid instead
+ if nic.get('net-id'):
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = []
+ if files:
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ f = open(lpath, 'r')
+ personality.append({
+ 'path': rpath,
+ 'contents': f.read()
+ })
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ try:
+ lb_id = int(lb.get('id'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer ID is not an integer: '
+ '%s' % lb.get('id'))
+ try:
+ port = int(lb.get('port'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer port is not an '
+ 'integer: %s' % lb.get('port'))
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=personality,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name, config_drive=config_drive,
+ user_data=user_data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['server_name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ disk_config = disk_config or 'AUTO'
+ if ((disk_config or lc.get('disk_config')) and
+ disk_config != lc.get('disk_config', 'AUTO')):
+ lc_args['disk_config'] = disk_config
+
+ if (meta or lc.get('meta')) and meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ test_personality = []
+ for p in personality:
+ test_personality.append({
+ 'path': p['path'],
+ 'contents': base64.b64encode(p['contents'])
+ })
+ if ((test_personality or lc.get('personality')) and
+ test_personality != lc.get('personality')):
+ lc_args['personality'] = personality
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if config_drive != lc.get('config_drive', False):
+ lc_args['config_drive'] = config_drive
+
+ if (user_data and
+ base64.b64encode(user_data) != lc.get('user_data')):
+ lc_args['user_data'] = user_data
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound as e:
+ sg = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ config_drive=dict(default=False, type='bool'),
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='dict', default={}),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ config_drive = module.params.get('config_drive')
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state, config_drive=config_drive, user_data=user_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
new file mode 100644
index 00000000..384825f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+options:
+ at:
+ type: str
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ type: int
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ type: str
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ desired_capacity:
+ type: int
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ type: bool
+ name:
+ type: str
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ type: str
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ type: str
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
new file mode 100644
index 00000000..8df9a5e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
@@ -0,0 +1,671 @@
+#!/usr/bin/python
+#
+# Scaleway Compute management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute
+short_description: Scaleway compute management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages compute instances on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ public_ip:
+ type: str
+ description:
+ - Manage public IP on a Scaleway server
+ - Could be Scaleway IP address UUID
+ - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - C(absent) Means no public IP at all
+ default: absent
+
+ enable_ipv6:
+ description:
+ - Enable public IPv6 connectivity on the instance
+ default: false
+ type: bool
+
+ image:
+ type: str
+ description:
+ - Image identifier used to start the instance with
+ required: true
+
+ name:
+ type: str
+ description:
+ - Name of the instance
+
+ organization:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+ - running
+ - restarted
+ - stopped
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the instance (5 max)
+ required: false
+ default: []
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ commercial_type:
+ type: str
+ description:
+ - Commercial name of the compute node
+ required: true
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the server to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the server
+ required: false
+ default: 3
+
+ security_group:
+ type: str
+ description:
+ - Security group unique identifier
+ - If no value provided, the default security group or current security group will be used
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ tags:
+ - test
+ - www
+
+- name: Create a server attached to a security group
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
+ tags:
+ - test
+ - www
+
+- name: Destroy it right after
+ community.general.scaleway_compute:
+ name: foobar
+ state: absent
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+'''
+
+RETURN = '''
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+SCALEWAY_SERVER_STATES = (
+ 'stopped',
+ 'stopping',
+ 'starting',
+ 'running',
+ 'locked'
+)
+
+SCALEWAY_TRANSITIONS_STATES = (
+ "stopping",
+ "starting",
+ "pending"
+)
+
+
+def check_image_id(compute_api, image_id):
+ response = compute_api.get(path="images/%s" % image_id)
+
+ if not response.ok:
+ msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
+ compute_api.module.fail_json(msg=msg)
+
+
+def fetch_state(compute_api, server):
+ compute_api.module.debug("fetch_state of server: %s" % server["id"])
+ response = compute_api.get(path="servers/%s" % server["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
+ return response.json["server"]["state"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(compute_api, server, wait=None):
+ if wait is None:
+ wait = compute_api.module.params["wait"]
+ if not wait:
+ return
+
+ wait_timeout = compute_api.module.params["wait_timeout"]
+ wait_sleep_time = compute_api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ compute_api.module.debug("We are going to wait for the server to finish its transition")
+ if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
+ compute_api.module.debug("It seems that the server is not in transition anymore.")
+ compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ compute_api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def public_ip_payload(compute_api, public_ip):
+ # We don't want a public ip
+ if public_ip in ("absent",):
+ return {"dynamic_ip_required": False}
+
+ # IP is only attached to the instance and is released as soon as the instance terminates
+ if public_ip in ("dynamic", "allocated"):
+ return {"dynamic_ip_required": True}
+
+ # We check that the IP we want to attach exists, if so its ID is returned
+ response = compute_api.get("ips")
+ if not response.ok:
+ msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ ip_list = []
+ try:
+ ip_list = response.json["ips"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
+
+ lookup = [ip["id"] for ip in ip_list]
+ if public_ip in lookup:
+ return {"public_ip": public_ip}
+
+
+def create_server(compute_api, server):
+ compute_api.module.debug("Starting a create_server")
+ target_server = None
+ data = {"enable_ipv6": server["enable_ipv6"],
+ "tags": server["tags"],
+ "commercial_type": server["commercial_type"],
+ "image": server["image"],
+ "dynamic_ip_required": server["dynamic_ip_required"],
+ "name": server["name"],
+ "organization": server["organization"]
+ }
+
+ if server["security_group"]:
+ data["security_group"] = server["security_group"]
+
+ response = compute_api.post(path="servers", data=data)
+
+ if not response.ok:
+ msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def restart_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="reboot")
+
+
+def stop_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweroff")
+
+
+def start_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweron")
+
+
+def perform_action(compute_api, server, action):
+ response = compute_api.post(path="servers/%s/action" % server["id"],
+ data={"action": action})
+ if not response.ok:
+ msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def remove_server(compute_api, server):
+ compute_api.module.debug("Starting remove server strategy")
+ response = compute_api.delete(path="servers/%s" % server["id"])
+ if not response.ok:
+ msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def present_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting present strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ return changed, target_server
+
+
+def absent_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting absent strategy")
+ changed = False
+ target_server = None
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ return changed, {"status": "Server already absent."}
+ else:
+ target_server = query_results[0]
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be made absent." % target_server["id"]}
+
+ # A server MUST be stopped to be deleted.
+ while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+ response = stop_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+
+ response = remove_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ return changed, {"status": "Server %s deleted" % target_server["id"]}
+
+
+def running_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting running strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being run."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("running", "starting"):
+ compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ response = start_server(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def stop_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting stop strategy")
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ changed = False
+
+ if not query_results:
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being stopped."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ changed = True
+ else:
+ target_server = query_results[0]
+
+ compute_api.module.debug("stop_strategy: Servers are found.")
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("stopped",):
+ compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be stopped." % target_server["id"]}
+
+ response = stop_server(compute_api=compute_api, server=target_server)
+ compute_api.module.debug(response.json)
+ compute_api.module.debug(response.ok)
+
+ if not response.ok:
+ msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def restart_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting restart strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being rebooted."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api,
+ target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+state_strategy = {
+ "present": present_strategy,
+ "restarted": restart_strategy,
+ "stopped": stop_strategy,
+ "running": running_strategy,
+ "absent": absent_strategy
+}
+
+
+def find(compute_api, wished_server, per_page=1):
+ compute_api.module.debug("Getting inside find")
+ # Only the name attribute is accepted in the Compute query API
+ response = compute_api.get("servers", params={"name": wished_server["name"],
+ "per_page": per_page})
+
+ if not response.ok:
+ msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ search_results = response.json["servers"]
+
+ return search_results
+
+
+PATCH_MUTABLE_SERVER_ATTRIBUTES = (
+ "ipv6",
+ "tags",
+ "name",
+ "dynamic_ip_required",
+ "security_group",
+)
+
+
+def server_attributes_should_be_changed(compute_api, target_server, wished_server):
+ compute_api.module.debug("Checking if server attributes should be changed")
+ compute_api.module.debug("Current Server: %s" % target_server)
+ compute_api.module.debug("Wished Server: %s" % wished_server)
+ debug_dict = dict((x, (target_server[x], wished_server[x]))
+ for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
+ if x in target_server and x in wished_server)
+ compute_api.module.debug("Debug dict %s" % debug_dict)
+ try:
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
+ ) and target_server[key]["id"] != wished_server[key]:
+ return True
+ # Handling other structure compare simply the two objects content
+ elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
+ return True
+ return False
+ except AttributeError:
+ compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
+
+
+def server_change_attributes(compute_api, target_server, wished_server):
+ compute_api.module.debug("Starting patching server attributes")
+ patch_payload = dict()
+
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
+ # Setting all key to current value except ID
+ key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
+ # Setting ID to the user specified ID
+ key_dict["id"] = wished_server[key]
+ patch_payload[key] = key_dict
+ elif not isinstance(target_server[key], dict):
+ patch_payload[key] = wished_server[key]
+
+ response = compute_api.patch(path="servers/%s" % target_server["id"],
+ data=patch_payload)
+ if not response.ok:
+ msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def core(module):
+ region = module.params["region"]
+ wished_server = {
+ "state": module.params["state"],
+ "image": module.params["image"],
+ "name": module.params["name"],
+ "commercial_type": module.params["commercial_type"],
+ "enable_ipv6": module.params["enable_ipv6"],
+ "tags": module.params["tags"],
+ "organization": module.params["organization"],
+ "security_group": module.params["security_group"]
+ }
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ compute_api = Scaleway(module=module)
+
+ check_image_id(compute_api, wished_server["image"])
+
+ # IP parameters of the wished server depends on the configuration
+ ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
+ wished_server.update(ip_payload)
+
+ changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
+ module.exit_json(changed=changed, msg=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ image=dict(required=True),
+ name=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ commercial_type=dict(required=True),
+ enable_ipv6=dict(default=False, type="bool"),
+ public_ip=dict(default="absent"),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ security_group=dict(),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
new file mode 100644
index 00000000..57803245
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+#
+# Scaleway database backups management module
+#
+# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_database_backup
+short_description: Scaleway database backups management module
+version_added: 1.2.0
+author: Guillaume Rodriguez (@guillaume_ro_fr)
+description:
+ - This module manages database backups on Scaleway account U(https://developer.scaleway.com).
+extends_documentation_fragment:
+ - community.general.scaleway
+options:
+ state:
+ description:
+ - Indicate desired state of the database backup.
+ - C(present) creates a backup.
+ - C(absent) deletes the backup.
+ - C(exported) creates a download link for the backup.
+ - C(restored) restores the backup to a new database.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - exported
+ - restored
+
+ region:
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ type: str
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ id:
+ description:
+ - UUID used to identify the database backup.
+ - Required for C(absent), C(exported) and C(restored) states.
+ type: str
+
+ name:
+ description:
+ - Name used to identify the database backup.
+ - Required for C(present) state.
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ database_name:
+ description:
+ - Name used to identify the database.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ instance_id:
+ description:
+ - UUID of the instance associated to the database backup.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ expires_at:
+ description:
+ - Expiration datetime of the database backup (ISO 8601 format).
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Time to wait for the backup to reach the expected state.
+ type: int
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ description:
+ - Time to wait before every attempt to check the state of the backup.
+ type: int
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+ - name: Create a backup
+ community.general.scaleway_database_backup:
+ name: 'my_backup'
+ state: present
+ region: 'fr-par'
+ database_name: 'my-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Export a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: exported
+ region: 'fr-par'
+
+ - name: Restore a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: restored
+ region: 'fr-par'
+ database_name: 'my-new-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Remove a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: absent
+ region: 'fr-par'
+'''
+
+RETURN = '''
+metadata:
+ description: Backup metadata.
+ returned: when C(state=present), C(state=exported) or C(state=restored)
+ type: dict
+ sample: {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
+ }
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ scaleway_argument_spec,
+ SCALEWAY_REGIONS,
+)
+
+stable_states = (
+ 'ready',
+ 'deleting',
+)
+
+
+def wait_to_complete_state_transition(module, account_api, backup=None):
+ wait_timeout = module.params['wait_timeout']
+ wait_sleep_time = module.params['wait_sleep_time']
+
+ if backup is None or backup['status'] in stable_states:
+ return backup
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ module.debug('We are going to wait for the backup to finish its transition')
+
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if not response.ok:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
+ break
+ response_json = response.json
+
+ if response_json['status'] in stable_states:
+ module.debug('It seems that the backup is not in transition anymore.')
+ module.debug('Backup in state: %s' % response_json['status'])
+ return response_json
+ time.sleep(wait_sleep_time)
+ else:
+ module.fail_json(msg='Backup takes too long to finish its transition')
+
+
+def present_strategy(module, account_api, backup):
+ name = module.params['name']
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+ expiration_date = module.params['expires_at']
+
+ if backup is not None:
+ if (backup['name'] == name or name is None) and (
+ backup['expires_at'] == expiration_date or expiration_date is None):
+ wait_to_complete_state_transition(module, account_api, backup)
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {}
+ if name is not None:
+ payload['name'] = name
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
+ payload)
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def absent_strategy(module, account_api, backup):
+ if backup is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def exported_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ if backup['download_url'] is not None:
+ module.exit_json(changed=False, metadata=backup)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+ response = account_api.post(
+ '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def restored_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+
+ payload = {'database_name': database_name, 'instance_id': instance_id}
+ response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
+ payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+state_strategy = {
+ 'present': present_strategy,
+ 'absent': absent_strategy,
+ 'exported': exported_strategy,
+ 'restored': restored_strategy,
+}
+
+
+def core(module):
+ state = module.params['state']
+ backup_id = module.params['id']
+
+ account_api = Scaleway(module)
+
+ if backup_id is None:
+ backup_by_id = None
+ else:
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
+ status_code = response.status_code
+ backup_json = response.json
+ backup_by_id = None
+ if status_code == 404:
+ backup_by_id = None
+ elif response.ok:
+ backup_by_id = backup_json
+ else:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
+
+ state_strategy[state](module, account_api, backup_by_id)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ id=dict(),
+ name=dict(type='str'),
+ database_name=dict(required=False),
+ instance_id=dict(required=False),
+ expires_at=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ wait_sleep_time=dict(type='int', default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['database_name', 'instance_id'],
+ ],
+ required_if=[
+ ['state', 'present', ['name', 'database_name', 'instance_id']],
+ ['state', 'absent', ['id']],
+ ['state', 'exported', ['id']],
+ ['state', 'restored', ['id', 'database_name', 'instance_id']],
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py
new file mode 100644
index 00000000..31bbfa76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_image_info) instead.
+short_description: Gather facts about the Scaleway images available.
+description:
+ - Gather facts about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images facts
+ community.general.scaleway_image_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_image_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_facts": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageFacts, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
new file mode 100644
index 00000000..3fad216e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_info
+short_description: Gather information about the Scaleway images available.
+description:
+ - Gather information about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images information
+ community.general.scaleway_image_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_image_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_image_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_info": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageInfo, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_image_info=ScalewayImageInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
new file mode 100644
index 00000000..26da122e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+#
+# Scaleway IP management module
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_ip
+short_description: Scaleway IP management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages IP on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the IP.
+ default: present
+ choices:
+ - present
+ - absent
+
+ organization:
+ type: str
+ description:
+ - Scaleway organization identifier
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ id:
+ type: str
+ description:
+ - id of the Scaleway IP (UUID)
+
+ server:
+ type: str
+ description:
+ - id of the server you want to attach an IP to.
+ - To unattach an IP don't specify this option
+
+ reverse:
+ type: str
+ description:
+ - Reverse to assign to the IP
+'''
+
+EXAMPLES = '''
+- name: Create an IP
+ community.general.scaleway_ip:
+ organization: '{{ scw_org }}'
+ state: present
+ region: par1
+ register: ip_creation_task
+
+- name: Make sure IP deleted
+ community.general.scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ips": [
+ {
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
+ }
+ ]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def ip_attributes_should_be_changed(api, target_ip, wished_ip):
+ patch_payload = {}
+
+ if target_ip["reverse"] != wished_ip["reverse"]:
+ patch_payload["reverse"] = wished_ip["reverse"]
+
+ # IP is assigned to a server
+ if target_ip["server"] is None and wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+
+ # IP is unassigned to a server
+ try:
+ if target_ip["server"]["id"] and wished_ip["server"] is None:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ # IP is migrated between 2 different servers
+ try:
+ if target_ip["server"]["id"] != wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ return patch_payload
+
+
+def payload_from_wished_ip(wished_ip):
+ return dict(
+ (k, v)
+ for k, v in wished_ip.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, wished_ip):
+ changed = False
+
+ response = api.get('ips')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ ips_list = response.json["ips"]
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+
+ if wished_ip["id"] not in ip_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "An IP would be created."}
+
+ # Create IP
+ creation_response = api.post('/ips',
+ data=payload_from_wished_ip(wished_ip))
+
+ if not creation_response.ok:
+ msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+ return changed, creation_response.json["ip"]
+
+ target_ip = ip_lookup[wished_ip["id"]]
+ patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
+
+ if not patch_payload:
+ return changed, target_ip
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP attributes would be changed."}
+
+ ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
+ data=patch_payload)
+
+ if not ip_patch_response.ok:
+ api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
+ ip_patch_response.status_code, ip_patch_response.json['message']))
+
+ return changed, ip_patch_response.json["ip"]
+
+
+def absent_strategy(api, wished_ip):
+ response = api.get('ips')
+ changed = False
+
+ status_code = response.status_code
+ ips_json = response.json
+ ips_list = ips_json["ips"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+ if wished_ip["id"] not in ip_lookup.keys():
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP would be destroyed"}
+
+ response = api.delete('/ips/' + wished_ip["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+ wished_ip = {
+ "organization": module.params['organization'],
+ "reverse": module.params["reverse"],
+ "id": module.params["id"],
+ "server": module.params["server"]
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
+ else:
+ changed, summary = present_strategy(api=api, wished_ip=wished_ip)
+ module.exit_json(changed=changed, scaleway_ip=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ organization=dict(required=True),
+ server=dict(),
+ reverse=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ id=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py
new file mode 100644
index 00000000..4227f360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_ip_info) instead.
+short_description: Gather facts about the Scaleway ips available.
+description:
+ - Gather facts about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips facts
+ community.general.scaleway_ip_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_ip_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_facts": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpFacts, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
new file mode 100644
index 00000000..145fb203
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_info
+short_description: Gather information about the Scaleway ips available.
+description:
+ - Gather information about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips information
+ community.general.scaleway_ip_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_ip_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_ip_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_info": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpInfo, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_ip_info=ScalewayIpInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
new file mode 100644
index 00000000..a9358188
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#
+# Scaleway Load-balancer management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_lb
+short_description: Scaleway load-balancer management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages load-balancers on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ name:
+ type: str
+ description:
+ - Name of the load-balancer
+ required: true
+
+ description:
+ type: str
+ description:
+ - Description of the load-balancer
+ required: true
+
+ organization_id:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway zone
+ required: true
+ choices:
+ - nl-ams
+ - fr-par
+ - pl-waw
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the load-balancer
+
+ wait:
+ description:
+ - Wait for the load-balancer to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the load-balancer to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the load-balancer
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+- name: Create a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: present
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+ tags:
+ - hello
+
+- name: Delete a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: absent
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+'''
+
+RETURNS = '''
+{
+ "scaleway_lb": {
+ "backend_count": 0,
+ "frontend_count": 0,
+ "description": "Description of my load-balancer",
+ "id": "00000000-0000-0000-0000-000000000000",
+ "instances": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.1",
+ "region": "fr-par",
+ "status": "ready"
+ },
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.2",
+ "region": "fr-par",
+ "status": "ready"
+ }
+ ],
+ "ip": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "192.168.0.1",
+ "lb_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "reverse": ""
+ }
+ ],
+ "name": "lb_ansible_test",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "status": "ready",
+ "tags": [
+ "first_tag",
+ "second_tag"
+ ]
+ }
+}
+'''
+
+import datetime
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "name",
+ "description"
+)
+
+
+def payload_from_wished_lb(wished_lb):
+ return {
+ "organization_id": wished_lb["organization_id"],
+ "name": wished_lb["name"],
+ "tags": wished_lb["tags"],
+ "description": wished_lb["description"]
+ }
+
+
+def fetch_state(api, lb):
+ api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
+ response = api.get(path=api.api_path + "/%s" % lb["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ api.module.fail_json(msg=msg)
+
+ try:
+ api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(api, lb, force_wait=False):
+ wait = api.module.params["wait"]
+ if not (wait or force_wait):
+ return
+ wait_timeout = api.module.params["wait_timeout"]
+ wait_sleep_time = api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ api.module.debug("We are going to wait for the load-balancer to finish its transition")
+ state = fetch_state(api, lb)
+ if state in STABLE_STATES:
+ api.module.debug("It seems that the load-balancer is not in transition anymore.")
+ api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def lb_attributes_should_be_changed(target_lb, wished_lb):
+ diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
+
+ if diff:
+ return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
+ else:
+ return diff
+
+
+def present_strategy(api, wished_lb):
+ changed = False
+
+ response = api.get(path=api.api_path)
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ lbs_list = response.json["lbs"]
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+
+ if wished_lb["name"] not in lb_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A load-balancer would be created."}
+
+ # Create Load-balancer
+ api.warn(payload_from_wished_lb(wished_lb))
+ creation_response = api.post(path=api.api_path,
+ data=payload_from_wished_lb(wished_lb))
+
+ if not creation_response.ok:
+ msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(api=api, lb=creation_response.json)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
+ wished_lb=wished_lb)
+
+ if not patch_payload:
+ return changed, target_lb
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer attributes would be changed."}
+
+ lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
+ data=patch_payload)
+
+ if not lb_patch_response.ok:
+ api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
+ lb_patch_response.status_code, lb_patch_response.json['message']))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, lb_patch_response.json
+
+
+def absent_strategy(api, wished_lb):
+ response = api.get(path=api.api_path)
+ changed = False
+
+ status_code = response.status_code
+ lbs_json = response.json
+ lbs_list = lbs_json["lbs"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+ if wished_lb["name"] not in lb_lookup.keys():
+ return changed, {}
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer would be destroyed"}
+
+ wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_load_balancer = {
+ "state": module.params["state"],
+ "name": module.params["name"],
+ "description": module.params["description"],
+ "tags": module.params["tags"],
+ "organization_id": module.params["organization_id"]
+ }
+ module.params['api_url'] = SCALEWAY_ENDPOINT
+ api = Scaleway(module=module)
+ api.api_path = "lb/v1/regions/%s/lbs" % region
+
+ changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
+ wished_lb=wished_load_balancer)
+ module.exit_json(changed=changed, scaleway_lb=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ description=dict(required=True),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization_id=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py
new file mode 100644
index 00000000..ee571cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_organization_info) instead.
+short_description: Gather facts about the Scaleway organizations available.
+description:
+ - Gather facts about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations facts
+ community.general.scaleway_organization_facts:
+'''
+
+RETURN = r'''
+---
+scaleway_organization_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_facts": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationFacts, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
new file mode 100644
index 00000000..f530dcb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_info
+short_description: Gather information about the Scaleway organizations available.
+description:
+ - Gather information about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations information
+ community.general.scaleway_organization_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_organization_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_organization_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_info": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationInfo, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
new file mode 100644
index 00000000..9303e06e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group
+short_description: Scaleway Security Group management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group on Scaleway account
+ U(https://developer.scaleway.com).
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ description:
+ - Indicate desired state of the Security Group.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+ organization:
+ description:
+ - Organization identifier.
+ type: str
+ required: true
+
+ region:
+ description:
+ - Scaleway region to use (for example C(par1)).
+ type: str
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ description:
+ - Name of the Security Group.
+ type: str
+ required: true
+
+ description:
+ description:
+ - Description of the Security Group.
+ type: str
+
+ stateful:
+ description:
+ - Create a stateful security group which allows established connections in and out.
+ type: bool
+ required: true
+
+ inbound_default_policy:
+ description:
+ - Default policy for incoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ outbound_default_policy:
+ description:
+ - Default policy for outcoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ organization_default:
+ description:
+ - Create security group to be the default one.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Create a Security Group
+ community.general.scaleway_security_group:
+ state: present
+ region: par1
+ name: security_group
+ description: "my security group description"
+ organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+from uuid import uuid4
+
+
+def payload_from_security_group(security_group):
+ return dict(
+ (k, v)
+ for k, v in security_group.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, security_group):
+ ret = {'changed': False}
+
+ response = api.get('security_groups')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+
+ if security_group['name'] not in security_group_lookup.keys():
+ ret['changed'] = True
+ if api.module.check_mode:
+ # Help user when check mode is enabled by defining id key
+ ret['scaleway_security_group'] = {'id': str(uuid4())}
+ return ret
+
+ # Create Security Group
+ response = api.post('/security_groups',
+ data=payload_from_security_group(security_group))
+
+ if not response.ok:
+ msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+ ret['scaleway_security_group'] = response.json['security_group']
+
+ else:
+ ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
+
+ return ret
+
+
+def absent_strategy(api, security_group):
+ response = api.get('security_groups')
+ ret = {'changed': False}
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+ if security_group['name'] not in security_group_lookup.keys():
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ security_group = {
+ 'organization': module.params['organization'],
+ 'name': module.params['name'],
+ 'description': module.params['description'],
+ 'stateful': module.params['stateful'],
+ 'inbound_default_policy': module.params['inbound_default_policy'],
+ 'outbound_default_policy': module.params['outbound_default_policy'],
+ 'organization_default': module.params['organization_default'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ api = Scaleway(module=module)
+ if module.params['state'] == 'present':
+ summary = present_strategy(api=api, security_group=security_group)
+ else:
+ summary = absent_strategy(api=api, security_group=security_group)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ organization=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ stateful=dict(type='bool', required=True),
+ inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ organization_default=dict(type='bool'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py
new file mode 100644
index 00000000..a43bfedb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_security_group_info) instead.
+short_description: Gather facts about the Scaleway security groups available.
+description:
+ - Gather facts about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups facts
+ community.general.scaleway_security_group_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_facts": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupFacts, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
new file mode 100644
index 00000000..d3488f0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_info
+short_description: Gather information about the Scaleway security groups available.
+description:
+ - Gather information about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups information
+ community.general.scaleway_security_group_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_security_group_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_info": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupInfo, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
new file mode 100644
index 00000000..054a4d47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group Rule management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group_rule
+short_description: Scaleway Security Group Rule management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group Rule on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the Security Group Rule.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ protocol:
+ type: str
+ description:
+ - Network protocol to use
+ choices:
+ - TCP
+ - UDP
+ - ICMP
+ required: true
+
+ port:
+ description:
+ - Port related to the rule, null value for all the ports
+ required: true
+ type: int
+
+ ip_range:
+ type: str
+ description:
+ - IPV4 CIDR notation to apply to the rule
+ default: 0.0.0.0/0
+
+ direction:
+ type: str
+ description:
+ - Rule direction
+ choices:
+ - inbound
+ - outbound
+ required: true
+
+ action:
+ type: str
+ description:
+ - Rule action
+ choices:
+ - accept
+ - drop
+ required: true
+
+ security_group:
+ type: str
+ description:
+ - Security Group unique identifier
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a Security Group Rule
+ community.general.scaleway_security_group_rule:
+ state: present
+ region: par1
+ protocol: TCP
+ port: 80
+ ip_range: 0.0.0.0/0
+ direction: inbound
+ action: accept
+ security_group: b57210ee-1281-4820-a6db-329f78596ecb
+ register: security_group_rule_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
+from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_sgr_from_api(security_group_rules, security_group_rule):
+ """ Check if a security_group_rule specs are present in security_group_rules
+ Return None if no rules match the specs
+ Return the rule if found
+ """
+ for sgr in security_group_rules:
+ if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
+ sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
+ sgr['protocol'] == security_group_rule['protocol']):
+ return sgr
+
+ return None
+
+
+def present_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ # Create Security Group Rule
+ response = api.post('/security_groups/%s/rules' % security_group_id,
+ data=payload_from_object(security_group_rule))
+
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error during security group rule creation: "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+ ret['scaleway_security_group_rule'] = response.json['rule']
+
+ else:
+ ret['scaleway_security_group_rule'] = existing_rule
+
+ return ret
+
+
+def absent_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete(
+ '/security_groups/%s/rules/%s' %
+ (security_group_id, existing_rule['id']))
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error deleting security group rule "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ api = Scaleway(module=module)
+
+ security_group_rule = {
+ 'protocol': module.params['protocol'],
+ 'dest_port_from': module.params['port'],
+ 'ip_range': module.params['ip_range'],
+ 'direction': module.params['direction'],
+ 'action': module.params['action'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ if module.params['state'] == 'present':
+ summary = present_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ else:
+ summary = absent_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
+ port=dict(type='int', required=True),
+ ip_range=dict(type='str', default='0.0.0.0/0'),
+ direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
+ action=dict(type='str', required=True, choices=['accept', 'drop']),
+ security_group=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py
new file mode 100644
index 00000000..d3e73669
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_server_info) instead.
+short_description: Gather facts about the Scaleway servers available.
+description:
+ - Gather facts about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers facts
+ community.general.scaleway_server_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_server_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_facts": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerFacts, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
new file mode 100644
index 00000000..43b0badc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_info
+short_description: Gather information about the Scaleway servers available.
+description:
+ - Gather information about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers information
+ community.general.scaleway_server_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_server_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_server_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_info": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerInfo, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_server_info=ScalewayServerInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py
new file mode 100644
index 00000000..25f99e72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_snapshot_info) instead.
+short_description: Gather facts about the Scaleway snapshots available.
+description:
+ - Gather facts about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots facts
+ community.general.scaleway_snapshot_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_facts": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotFacts, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
new file mode 100644
index 00000000..f31b74b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_info
+short_description: Gather information about the Scaleway snapshots available.
+description:
+ - Gather information about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots information
+ community.general.scaleway_snapshot_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_snapshot_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_info": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotInfo, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
new file mode 100644
index 00000000..08555b23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+#
+# Scaleway SSH keys management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_sshkey
+short_description: Scaleway SSH keys management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages SSH keys on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the SSH key.
+ default: present
+ choices:
+ - present
+ - absent
+ ssh_pub_key:
+ type: str
+ description:
+ - The public SSH key as a string to add.
+ required: true
+ api_url:
+ type: str
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+'''
+
+EXAMPLES = '''
+- name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+- name: "Delete SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "absent"
+
+- name: "Add SSH key with explicit token"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+ oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ssh_public_keys": [
+ {"key": "ssh-rsa AAAA...."}
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
+
+
+def extract_present_sshkeys(raw_organization_dict):
+ ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
+ ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
+ return ssh_key_lookup
+
+
+def extract_user_id(raw_organization_dict):
+ return raw_organization_dict["organizations"][0]["users"][0]["id"]
+
+
+def sshkey_user_patch(ssh_lookup):
+ ssh_list = {"ssh_public_keys": [{"key": key}
+ for key in ssh_lookup]}
+ return ssh_list
+
+
+def core(module):
+ ssh_pub_key = module.params['ssh_pub_key']
+ state = module.params["state"]
+ account_api = Scaleway(module)
+ response = account_api.get('organizations')
+
+ status_code = response.status_code
+ organization_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ user_id = extract_user_id(organization_json)
+ present_sshkeys = []
+ try:
+ present_sshkeys = extract_present_sshkeys(organization_json)
+ except (KeyError, IndexError) as e:
+ module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
+
+ if state in ('present',):
+ if ssh_pub_key in present_sshkeys:
+ module.exit_json(changed=False)
+
+ # If key not found create it!
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.append(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if ssh_pub_key not in present_sshkeys:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.remove(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ ssh_pub_key=dict(required=True),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
new file mode 100644
index 00000000..4a38e76d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# Scaleway user data management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_user_data
+short_description: Scaleway user_data management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages user_data on compute instances on Scaleway."
+ - "It can be used to configure cloud-init for instance"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ server_id:
+ type: str
+ description:
+ - Scaleway Compute instance ID of the server
+ required: true
+
+ user_data:
+ type: dict
+ description:
+ - User defined data. Typically used with `cloud-init`.
+ - Pass your cloud-init script here as a string
+ required: false
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = '''
+- name: Update the cloud-init
+ community.general.scaleway_user_data:
+ server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
+ region: ams1
+ user_data:
+ cloud-init: 'final_message: "Hello World!"'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+
+def patch_user_data(compute_api, server_id, key, value):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def delete_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
+
+ response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
+
+ if not response.ok:
+ msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def get_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.get(path=path)
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ server_id = module.params["server_id"]
+ user_data = module.params["user_data"]
+ changed = False
+
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+ compute_api = Scaleway(module=module)
+
+ user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
+ if not user_data_list.ok:
+ msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
+ compute_api.module.fail_json(msg=msg)
+
+ present_user_data_keys = user_data_list.json["user_data"]
+ present_user_data = dict(
+ (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
+ for key in present_user_data_keys
+ )
+
+ if present_user_data == user_data:
+ module.exit_json(changed=changed, msg=user_data_list.json)
+
+ # First we remove keys that are not defined in the wished user_data
+ for key in present_user_data:
+ if key not in user_data:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
+
+ # Then we patch keys that are different
+ for key, value in user_data.items():
+ if key not in present_user_data or user_data[key] != present_user_data[key]:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
+
+ module.exit_json(changed=changed, msg=user_data)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ user_data=dict(type="dict"),
+ server_id=dict(required=True),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
new file mode 100644
index 00000000..e879d3c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+#
+# Scaleway volumes management module
+#
+# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_volume
+short_description: Scaleway volumes management module
+author: Henryk Konsek (@hekonsek)
+description:
+ - This module manages volumes on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the volume.
+ default: present
+ choices:
+ - present
+ - absent
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+ name:
+ type: str
+ description:
+ - Name used to identify the volume.
+ required: true
+ organization:
+ type: str
+ description:
+ - ScaleWay organization ID to which volume belongs.
+ size:
+ type: int
+ description:
+ - Size of the volume in bytes.
+ volume_type:
+ type: str
+ description:
+ - Type of the volume (for example 'l_ssd').
+'''
+
+EXAMPLES = '''
+- name: Create 10GB volume
+ community.general.scaleway_volume:
+ name: my-volume
+ state: present
+ region: par1
+ organization: "{{ scw_org }}"
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- name: Make sure volume deleted
+ community.general.scaleway_volume:
+ name: my-volume
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "volume": {
+ "export_uri": null,
+ "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
+ "name": "volume-0-3",
+ "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
+ "server": null,
+ "size": 10000000000,
+ "volume_type": "l_ssd"
+ }
+}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def core(module):
+ state = module.params['state']
+ name = module.params['name']
+ organization = module.params['organization']
+ size = module.params['size']
+ volume_type = module.params['volume_type']
+
+ account_api = Scaleway(module)
+ response = account_api.get('volumes')
+ status_code = response.status_code
+ volumes_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ volumeByName = None
+ for volume in volumes_json['volumes']:
+ if volume['organization'] == organization and volume['name'] == name:
+ volumeByName = volume
+
+ if state in ('present',):
+ if volumeByName is not None:
+ module.exit_json(changed=False)
+
+ payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
+
+ response = account_api.post('/volumes', payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if volumeByName is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/volumes/' + volumeByName['id'])
+ if response.status_code == 204:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ size=dict(type='int'),
+ organization=dict(),
+ volume_type=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py
new file mode 100644
index 00000000..e894f965
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_volume_info) instead.
+short_description: Gather facts about the Scaleway volumes available.
+description:
+ - Gather facts about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes facts
+ community.general.scaleway_volume_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_volume_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_facts": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeFacts, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
new file mode 100644
index 00000000..ff6093e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_info
+short_description: Gather information about the Scaleway volumes available.
+description:
+ - Gather information about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes information
+ community.general.scaleway_volume_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_volume_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_volume_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_info": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeInfo, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
new file mode 100644
index 00000000..18a67d01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: imgadm
+short_description: Manage SmartOS images
+description:
+ - Manage SmartOS virtual machine images through imgadm(1M)
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ force:
+ required: false
+ type: bool
+ description:
+ - Force a given operation (where supported by imgadm(1M)).
+ pool:
+ required: false
+ default: zones
+ description:
+ - zpool to import to or delete images from.
+ type: str
+ source:
+ required: false
+ description:
+ - URI for the image source.
+ type: str
+ state:
+ required: true
+ choices: [ present, absent, deleted, imported, updated, vacuumed ]
+ description:
+ - State the object operated on should be in. C(imported) is an alias for
+ for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
+ and C(uuid) to C(*), it will remove all unused images.
+ type: str
+
+ type:
+ required: false
+ choices: [ imgapi, docker, dsapi ]
+ default: imgapi
+ description:
+ - Type for image sources.
+ type: str
+
+ uuid:
+ required: false
+ description:
+ - Image UUID. Can either be a full UUID or C(*) for all images.
+ type: str
+
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Import an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: imported
+
+- name: Delete an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: deleted
+
+- name: Update all images
+ community.general.imgadm:
+ uuid: '*'
+ state: updated
+
+- name: Update a single image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: updated
+
+- name: Add a source
+ community.general.imgadm:
+ source: 'https://datasets.project-fifo.net'
+ state: present
+
+- name: Add a Docker source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ type: docker
+ state: present
+
+- name: Remove a source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ state: absent
+'''
+
+RETURN = '''
+source:
+ description: Source that is managed.
+ returned: When not managing an image.
+ type: str
+ sample: https://datasets.project-fifo.net
+uuid:
+ description: UUID for an image operated on.
+ returned: When not managing an image source.
+ type: str
+ sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'present'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
+# -E option to return any errors in JSON, the generated JSON does not play well
+# with the JSON parsers of Python. The returned message contains '\n' as part of
+# the stacktrace, which breaks the parsers.
+
+
+class Imgadm(object):
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.cmd = module.get_bin_path('imgadm', required=True)
+ self.changed = False
+ self.uuid = module.params['uuid']
+
+ # Since there are a number of (natural) aliases, prevent having to look
+ # them up everytime we operate on `state`.
+ if self.params['state'] in ['present', 'imported', 'updated']:
+ self.present = True
+ else:
+ self.present = False
+
+ # Perform basic UUID validation upfront.
+ if self.uuid and self.uuid != '*':
+ if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
+ module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
+
+ # Helper method to massage stderr
+ def errmsg(self, stderr):
+ match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
+ if match:
+ return match.groups()[0]
+ else:
+ return 'Unexpected failure'
+
+ def update_images(self):
+ if self.uuid == '*':
+ cmd = '{0} update'.format(self.cmd)
+ else:
+ cmd = '{0} update {1}'.format(self.cmd, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
+
+ # There is no feedback from imgadm(1M) to determine if anything
+ # was actually changed. So treat this as an 'always-changes' operation.
+ # Note that 'imgadm -v' produces unparseable JSON...
+ self.changed = True
+
+ def manage_sources(self):
+ force = self.params['force']
+ source = self.params['source']
+ imgtype = self.params['type']
+
+ cmd = '{0} sources'.format(self.cmd)
+
+ if force:
+ cmd += ' -f'
+
+ if self.present:
+ cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
+
+ # Check the various responses.
+ # Note that trying to add a source with the wrong type is handled
+ # above as it results in a non-zero status.
+
+ regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Added "%s" image source "%s"' % (imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = True
+ else:
+ # Type is ignored by imgadm(1M) here
+ cmd += ' -d %s' % source
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
+
+ regex = 'Do not have image source "%s", no change' % source
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Deleted ".*" image source "%s"' % source
+ if re.match(regex, stdout):
+ self.changed = True
+
+ def manage_images(self):
+ pool = self.params['pool']
+ state = self.params['state']
+
+ if state == 'vacuumed':
+ # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
+ cmd = '{0} vacuum -f'.format(self.cmd)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
+ else:
+ if stdout == '':
+ self.changed = False
+ else:
+ self.changed = True
+ if self.present:
+ cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
+
+ regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = '.*ActiveImageNotFound.*'
+ if re.match(regex, stderr):
+ self.changed = False
+
+ regex = 'Imported image {0}.*'.format(self.uuid)
+ if re.match(regex, stdout.splitlines()[-1]):
+ self.changed = True
+ else:
+ cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ regex = '.*ImageNotInstalled.*'
+ if re.match(regex, stderr):
+ # Even if the 'rc' was non-zero (3), we handled the situation
+ # in order to determine if there was a change.
+ self.changed = False
+
+ regex = 'Deleted image {0}'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool'),
+ pool=dict(default='zones'),
+ source=dict(),
+ state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
+ type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
+ uuid=dict()
+ ),
+ # This module relies largely on imgadm(1M) to enforce idempotency, which does not
+ # provide a "noop" (or equivalent) mode to do a dry-run.
+ supports_check_mode=False,
+ )
+
+ imgadm = Imgadm(module)
+
+ uuid = module.params['uuid']
+ source = module.params['source']
+ state = module.params['state']
+
+ result = {'state': state}
+
+ # Either manage sources or images.
+ if source:
+ result['source'] = source
+ imgadm.manage_sources()
+ else:
+ result['uuid'] = uuid
+
+ if state == 'updated':
+ imgadm.update_images()
+ else:
+ # Make sure operate on a single image for the following actions
+ if (uuid == '*') and (state != 'vacuumed'):
+ module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
+ imgadm.manage_images()
+
+ result['changed'] = imgadm.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
new file mode 100644
index 00000000..7db7c5ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bruce Smith <Bruce.Smith.IT@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nictagadm
+short_description: Manage nic tags on SmartOS systems
+description:
+ - Create or delete nic tags on SmartOS systems.
+author:
+- Bruce Smith (@SmithX10)
+options:
+ name:
+ description:
+ - Name of the nic tag.
+ required: true
+ type: str
+ mac:
+ description:
+ - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
+ - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ type: str
+ etherstub:
+ description:
+ - Specifies that the nic tag will be attached to a created I(etherstub).
+ - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ type: bool
+ default: no
+ mtu:
+ description:
+ - Specifies the size of the I(mtu) of the desired nic tag.
+ - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ type: int
+ force:
+ description:
+ - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ type: bool
+ default: no
+ state:
+ description:
+ - Create or delete a SmartOS nic tag.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
+ community.general.nictagadm:
+ name: storage0
+ mac: 00:1b:21:a3:f5:4d
+ mtu: 9000
+ state: present
+
+- name: Remove 'storage0' nic tag
+ community.general.nictagadm:
+ name: storage0
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: nic tag name
+ returned: always
+ type: str
+ sample: storage0
+mac:
+ description: MAC Address that the nic tag was attached to.
+ returned: always
+ type: str
+ sample: 00:1b:21:a3:f5:4d
+etherstub:
+ description: specifies if the nic tag will create and attach to an etherstub.
+ returned: always
+ type: bool
+ sample: False
+mtu:
+ description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ returned: always
+ type: int
+ sample: 1500
+force:
+ description: Shows if -f was used during the deletion of a nic tag
+ returned: always
+ type: bool
+ sample: False
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+
+
+class NicTag(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.mac = module.params['mac']
+ self.etherstub = module.params['etherstub']
+ self.mtu = module.params['mtu']
+ self.force = module.params['force']
+ self.state = module.params['state']
+
+ self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
+
+ def is_valid_mac(self):
+ return is_mac(self.mac.lower())
+
+ def nictag_exists(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('exists')
+ cmd.append(self.name)
+
+ (rc, dummy, dummy) = self.module.run_command(cmd)
+
+ return rc == 0
+
+ def add_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('add')
+
+ if self.etherstub:
+ cmd.append('-l')
+
+ if self.mtu:
+ cmd.append('-p')
+ cmd.append('mtu=' + str(self.mtu))
+
+ if self.mac:
+ cmd.append('-p')
+ cmd.append('mac=' + str(self.mac))
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('delete')
+
+ if self.force:
+ cmd.append('-f')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ mac=dict(type='str'),
+ etherstub=dict(type='bool', default=False),
+ mtu=dict(type='int'),
+ force=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ mutually_exclusive=[
+ ['etherstub', 'mac'],
+ ['etherstub', 'mtu'],
+ ],
+ required_if=[
+ ['etherstub', False, ['name', 'mac']],
+ ['state', 'absent', ['name', 'force']],
+ ],
+ supports_check_mode=True
+ )
+
+ nictag = NicTag(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ changed=False,
+ etherstub=nictag.etherstub,
+ force=nictag.force,
+ name=nictag.name,
+ mac=nictag.mac,
+ mtu=nictag.mtu,
+ state=nictag.state,
+ )
+
+ if not nictag.is_valid_mac():
+ module.fail_json(msg='Invalid MAC Address Value',
+ name=nictag.name,
+ mac=nictag.mac,
+ etherstub=nictag.etherstub)
+
+ if nictag.state == 'absent':
+ if nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.delete_nictag()
+ if rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+ elif nictag.state == 'present':
+ if not nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.add_nictag()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+
+ if rc is not None:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Å tevko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Å tevko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
new file mode 100644
index 00000000..553e6efc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
@@ -0,0 +1,796 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vmadm
+short_description: Manage SmartOS virtual machines and zones.
+description:
+ - Manage SmartOS virtual machines through vmadm(1M).
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ archive_on_delete:
+ required: false
+ description:
+ - When enabled, the zone dataset will be mounted on C(/zones/archive)
+ upon removal.
+ type: bool
+ autoboot:
+ required: false
+ description:
+ - Whether or not a VM is booted when the system is rebooted.
+ type: bool
+ brand:
+ choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
+ default: joyent
+ description:
+ - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ type: str
+ boot:
+ required: false
+ description:
+ - Set the boot order for KVM VMs.
+ type: str
+ cpu_cap:
+ required: false
+ description:
+ - Sets a limit on the amount of CPU time that can be used by a VM.
+ Use C(0) for no cap.
+ type: int
+ cpu_shares:
+ required: false
+ description:
+ - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
+ a VM. This limit is relative to all other VMs on the system.
+ type: int
+ cpu_type:
+ required: false
+ choices: [ qemu64, host ]
+ default: qemu64
+ description:
+ - Control the type of virtual CPU exposed to KVM VMs.
+ type: str
+ customer_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contain customer
+ modifiable keys.
+ type: dict
+ delegate_dataset:
+ required: false
+ description:
+ - Whether to delegate a ZFS dataset to an OS VM.
+ type: bool
+ disk_driver:
+ required: false
+ description:
+ - Default value for a virtual disk model for KVM guests.
+ type: str
+ disks:
+ required: false
+ description:
+ - A list of disks to add, valid properties are documented in vmadm(1M).
+ type: list
+ dns_domain:
+ required: false
+ description:
+ - Domain value for C(/etc/hosts).
+ type: str
+ docker:
+ required: false
+ description:
+ - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ type: bool
+ filesystems:
+ required: false
+ description:
+ - Mount additional filesystems into an OS VM.
+ type: list
+ firewall_enabled:
+ required: false
+ description:
+ - Enables the firewall, allowing fwadm(1M) rules to be applied.
+ type: bool
+ force:
+ required: false
+ description:
+ - Force a particular action (i.e. stop or delete a VM).
+ type: bool
+ fs_allowed:
+ required: false
+ description:
+ - Comma separated list of filesystem types this zone is allowed to mount.
+ type: str
+ hostname:
+ required: false
+ description:
+ - Zone/VM hostname.
+ type: str
+ image_uuid:
+ required: false
+ description:
+ - Image UUID.
+ type: str
+ indestructible_delegated:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to delegated datasets.
+ type: bool
+ indestructible_zoneroot:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to zoneroot.
+ type: bool
+ internal_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contains operator
+ generated keys.
+ type: dict
+ internal_metadata_namespace:
+ required: false
+ description:
+ - List of namespaces to be set as I(internal_metadata-only); these namespaces
+ will come from I(internal_metadata) rather than I(customer_metadata).
+ type: str
+ kernel_version:
+ required: false
+ description:
+ - Kernel version to emulate for LX VMs.
+ type: str
+ limit_priv:
+ required: false
+ description:
+ - Set (comma separated) list of privileges the zone is allowed to use.
+ type: str
+ maintain_resolvers:
+ required: false
+ description:
+ - Resolvers in C(/etc/resolv.conf) will be updated when updating
+ the I(resolvers) property.
+ type: bool
+ max_locked_memory:
+ required: false
+ description:
+ - Total amount of memory (in MiBs) on the host that can be locked by this VM.
+ type: int
+ max_lwps:
+ required: false
+ description:
+ - Maximum number of lightweight processes this VM is allowed to have running.
+ type: int
+ max_physical_memory:
+ required: false
+ description:
+ - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
+ type: int
+ max_swap:
+ required: false
+ description:
+ - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
+ type: int
+ mdata_exec_timeout:
+ required: false
+ description:
+ - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
+ that runs user-scripts in the zone.
+ type: int
+ name:
+ required: false
+ aliases: [ alias ]
+ description:
+ - Name of the VM. vmadm(1M) uses this as an optional name.
+ type: str
+ nic_driver:
+ required: false
+ description:
+ - Default value for a virtual NIC model for KVM guests.
+ type: str
+ nics:
+ required: false
+ description:
+ - A list of nics to add, valid properties are documented in vmadm(1M).
+ type: list
+ nowait:
+ required: false
+ description:
+ - Consider the provisioning complete when the VM first starts, rather than
+ when the VM has rebooted.
+ type: bool
+ qemu_opts:
+ required: false
+ description:
+ - Additional qemu arguments for KVM guests. This overwrites the default arguments
+ provided by vmadm(1M) and should only be used for debugging.
+ type: str
+ qemu_extra_opts:
+ required: false
+ description:
+ - Additional qemu cmdline arguments for KVM guests.
+ type: str
+ quota:
+ required: false
+ description:
+ - Quota on zone filesystems (in MiBs).
+ type: int
+ ram:
+ required: false
+ description:
+ - Amount of virtual RAM for a KVM guest (in MiBs).
+ type: int
+ resolvers:
+ required: false
+ description:
+ - List of resolvers to be put into C(/etc/resolv.conf).
+ type: list
+ routes:
+ required: false
+ description:
+ - Dictionary that maps destinations to gateways, these will be set as static
+ routes in the VM.
+ type: dict
+ spice_opts:
+ required: false
+ description:
+ - Addition options for SPICE-enabled KVM VMs.
+ type: str
+ spice_password:
+ required: false
+ description:
+ - Password required to connect to SPICE. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ state:
+ choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
+ default: running
+ description:
+ - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
+ operate on a VM that is currently provisioned. C(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. C(absent) will
+ shutdown the zone before removing it.
+ C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ it down.
+ type: str
+ tmpfs:
+ required: false
+ description:
+ - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
+ type: int
+ uuid:
+ required: false
+ description:
+ - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ type: str
+ vcpus:
+ required: false
+ description:
+ - Number of virtual CPUs for a KVM guest.
+ type: int
+ vga:
+ required: false
+ description:
+ - Specify VGA emulation used by KVM VMs.
+ type: str
+ virtio_txburst:
+ required: false
+ description:
+ - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
+ type: int
+ virtio_txtimer:
+ required: false
+ description:
+ - Timeout (in nanoseconds) for the TX timer of virtio NICs.
+ type: int
+ vnc_password:
+ required: false
+ description:
+ - Password required to connect to VNC. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ vnc_port:
+ required: false
+ description:
+ - TCP port to listen of the VNC server. Or set C(0) for random,
+ or C(-1) to disable.
+ type: int
+ zfs_data_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs data dataset. This option
+ only has effect on delegated datasets.
+ type: str
+ zfs_data_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
+ type: int
+ zfs_filesystem_limit:
+ required: false
+ description:
+ - Maximum number of filesystems the VM can have.
+ type: int
+ zfs_io_priority:
+ required: false
+ description:
+ - IO throttle priority value relative to other VMs.
+ type: int
+ zfs_root_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs root dataset. This option
+ only has effect on the zoneroot dataset.
+ type: str
+ zfs_root_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
+ type: int
+ zfs_snapshot_limit:
+ required: false
+ description:
+ - Number of snapshots the VM can have.
+ type: int
+ zpool:
+ required: false
+ description:
+ - ZFS pool the VM's zone dataset will be created in.
+ type: str
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Create SmartOS zone
+ community.general.vmadm:
+ brand: joyent
+ state: present
+ alias: fw_zone
+ image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
+ firewall_enabled: yes
+ indestructible_zoneroot: yes
+ nics:
+ - nic_tag: admin
+ ip: dhcp
+ primary: true
+ internal_metadata:
+ root_pw: 'secret'
+ quota: 1
+
+- name: Delete a zone
+ community.general.vmadm:
+ alias: test_zone
+ state: deleted
+
+- name: Stop all zones
+ community.general.vmadm:
+ uuid: '*'
+ state: stopped
+'''
+
+RETURN = '''
+uuid:
+ description: UUID of the managed VM.
+ returned: always
+ type: str
+ sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
+alias:
+ description: Alias of the managed VM.
+ returned: When addressing a VM by alias.
+ type: str
+ sample: 'dns-zone'
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'running'
+'''
+
+import json
+import os
+import re
+import tempfile
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+# While vmadm(1M) supports a -E option to return any errors in JSON, the
+# generated JSON does not play well with the JSON parsers of Python.
+# The returned message contains '\n' as part of the stacktrace,
+# which breaks the parsers.
+
+
+def get_vm_prop(module, uuid, prop):
+ # Lookup a property for the given VM.
+ # Returns the property, or None if not found.
+ cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and prop in stdout_json[0]:
+ return stdout_json[0][prop]
+ else:
+ return None
+
+
+def get_vm_uuid(module, alias):
+ # Lookup the uuid that goes with the given alias.
+ # Returns the uuid or '' if not found.
+ cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
+
+ # If no VM was found matching the given alias, we get back an empty array.
+ # That is not an error condition as we might be explicitly checking it's
+ # absence.
+ if stdout.strip() == '[]':
+ return None
+ else:
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
+ return stdout_json[0]['uuid']
+
+
+def get_all_vm_uuids(module):
+ # Retrieve the UUIDs for all VMs.
+ cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg='Failed to get VMs list', exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ return [v['uuid'] for v in stdout_json]
+ except Exception as e:
+ module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
+ exception=traceback.format_exc())
+
+
+def new_vm(module, uuid, vm_state):
+ payload_file = create_payload(module, uuid)
+
+ (rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
+
+ if rc != 0:
+ changed = False
+ module.fail_json(msg='Could not create VM', exception=stderr)
+ else:
+ changed = True
+ # 'vmadm create' returns all output to stderr...
+ match = re.match('Successfully created VM (.*)', stderr)
+ if match:
+ vm_uuid = match.groups()[0]
+ if not is_valid_uuid(vm_uuid):
+ module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
+ else:
+ module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
+
+ # Now that the VM is created, ensure it is in the desired state (if not 'running')
+ if vm_state != 'running':
+ ret = set_vm_state(module, vm_uuid, vm_state)
+ if not ret:
+ module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
+
+ try:
+ os.unlink(payload_file)
+ except Exception as e:
+ # Since the payload may contain sensitive information, fail hard
+ # if we cannot remove the file so the operator knows about it.
+ module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, vm_uuid
+
+
+def vmadm_create_vm(module, payload_file):
+ # Create a new VM using the provided payload.
+ cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
+
+ return module.run_command(cmd)
+
+
+def set_vm_state(module, vm_uuid, vm_state):
+ p = module.params
+
+ # Check if the VM is already in the desired state.
+ state = get_vm_prop(module, vm_uuid, 'state')
+ if state and (state == vm_state):
+ return None
+
+ # Lookup table for the state to be in, and which command to use for that.
+ # vm_state: [vmadm commandm, forceable?]
+ cmds = {
+ 'stopped': ['stop', True],
+ 'running': ['start', False],
+ 'deleted': ['delete', True],
+ 'rebooted': ['reboot', False]
+ }
+
+ if p['force'] and cmds[vm_state][1]:
+ force = '-F'
+ else:
+ force = ''
+
+ cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ match = re.match('^Successfully.*', stderr)
+ if match:
+ return True
+ else:
+ return False
+
+
+def create_payload(module, uuid):
+ # Create the JSON payload (vmdef) and return the filename.
+
+ # Filter out the few options that are not valid VM properties.
+ module_options = ['debug', 'force', 'state']
+ # @TODO make this a simple {} comprehension as soon as py2 is ditched
+ # @TODO {k: v for k, v in p.items() if k not in module_options}
+ vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
+
+ try:
+ vmdef_json = json.dumps(vmdef)
+ except Exception as e:
+ module.fail_json(
+ msg='Could not create valid JSON payload', exception=traceback.format_exc())
+
+ # Create the temporary file that contains our payload, and set tight
+ # permissions for it may container sensitive information.
+ try:
+ # XXX: When there's a way to get the current ansible temporary directory
+ # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
+ # the payload (thus removing the `save_payload` option).
+ fname = tempfile.mkstemp()[1]
+ os.chmod(fname, 0o400)
+ with open(fname, 'w') as fh:
+ fh.write(vmdef_json)
+ except Exception as e:
+ module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
+
+ return fname
+
+
+def vm_state_transition(module, uuid, vm_state):
+ ret = set_vm_state(module, uuid, vm_state)
+
+ # Whether the VM changed state.
+ if ret is None:
+ return False
+ elif ret:
+ return True
+ else:
+ module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
+
+
+def is_valid_uuid(uuid):
+ if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
+ return True
+ else:
+ return False
+
+
+def validate_uuids(module):
+ # Perform basic UUID validation.
+ failed = []
+
+ for u in [['uuid', module.params['uuid']],
+ ['image_uuid', module.params['image_uuid']]]:
+ if u[1] and u[1] != '*':
+ if not is_valid_uuid(u[1]):
+ failed.append(u[0])
+
+ if len(failed) > 0:
+ module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
+
+
+def manage_all_vms(module, vm_state):
+ # Handle operations for all VMs, which can by definition only
+ # be state transitions.
+ state = module.params['state']
+
+ if state == 'created':
+ module.fail_json(msg='State "created" is only valid for tasks with a single VM')
+
+ # If any of the VMs has a change, the task as a whole has a change.
+ any_changed = False
+
+ # First get all VM uuids and for each check their state, and adjust it if needed.
+ for uuid in get_all_vm_uuids(module):
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+ if not current_vm_state and vm_state == 'deleted':
+ any_changed = False
+ else:
+ if module.check_mode:
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ any_changed = True
+ else:
+ any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
+
+ return any_changed
+
+
+def main():
+ # In order to reduce the clutter and boilerplate for trivial options,
+ # abstract the vmadm properties and build the dict of arguments later.
+ # Dict of all options that are simple to define based on their type.
+ # They're not required and have a default of None.
+ properties = {
+ 'str': [
+ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
+ 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
+ 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
+ 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
+ 'zfs_root_compression', 'zpool'
+ ],
+ 'bool': [
+ 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
+ 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
+ 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
+ ],
+ 'int': [
+ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
+ 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
+ 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
+ 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
+ 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
+ 'zfs_snapshot_limit'
+ ],
+ 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
+ 'list': ['disks', 'nics', 'resolvers', 'filesystems']
+ }
+
+ # Start with the options that are not as trivial as those above.
+ options = dict(
+ state=dict(
+ default='running',
+ type='str',
+ choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
+ ),
+ name=dict(
+ default=None, type='str',
+ aliases=['alias']
+ ),
+ brand=dict(
+ default='joyent',
+ type='str',
+ choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
+ ),
+ cpu_type=dict(
+ default='qemu64',
+ type='str',
+ choices=['host', 'qemu64']
+ ),
+ # Regular strings, however these require additional options.
+ spice_password=dict(type='str', no_log=True),
+ vnc_password=dict(type='str', no_log=True),
+ )
+
+ # Add our 'simple' options to options dict.
+ for type in properties:
+ for p in properties[type]:
+ option = dict(default=None, type=type)
+ options[p] = option
+
+ module = AnsibleModule(
+ argument_spec=options,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid']]
+ )
+
+ module.vmadm = module.get_bin_path('vmadm', required=True)
+
+ p = module.params
+ uuid = p['uuid']
+ state = p['state']
+
+ # Translate the state parameter into something we can use later on.
+ if state in ['present', 'running']:
+ vm_state = 'running'
+ elif state in ['stopped', 'created']:
+ vm_state = 'stopped'
+ elif state in ['absent', 'deleted']:
+ vm_state = 'deleted'
+ elif state in ['restarted', 'rebooted']:
+ vm_state = 'rebooted'
+
+ result = {'state': state}
+
+ # While it's possible to refer to a given VM by it's `alias`, it's easier
+ # to operate on VMs by their UUID. So if we're not given a `uuid`, look
+ # it up.
+ if not uuid:
+ uuid = get_vm_uuid(module, p['name'])
+ # Bit of a chicken and egg problem here for VMs with state == deleted.
+ # If they're going to be removed in this play, we have to lookup the
+ # uuid. If they're already deleted there's nothing to lookup.
+ # So if state == deleted and get_vm_uuid() returned '', the VM is already
+ # deleted and there's nothing else to do.
+ if uuid is None and vm_state == 'deleted':
+ result['name'] = p['name']
+ module.exit_json(**result)
+
+ validate_uuids(module)
+
+ if p['name']:
+ result['name'] = p['name']
+ result['uuid'] = uuid
+
+ if uuid == '*':
+ result['changed'] = manage_all_vms(module, vm_state)
+ module.exit_json(**result)
+
+ # The general flow is as follows:
+ # - first the current state of the VM is obtained by it's UUID.
+ # - If the state was not found and the desired state is 'deleted', return.
+ # - If the state was not found, it means the VM has to be created.
+ # Subsequently the VM will be set to the desired state (i.e. stopped)
+ # - Otherwise, it means the VM exists already and we operate on it's
+ # state (i.e. reboot it.)
+ #
+ # In the future it should be possible to query the VM for a particular
+ # property as a valid state (i.e. queried) so the result can be
+ # registered.
+ # Also, VMs should be able to get their properties updated.
+ # Managing VM snapshots should be part of a standalone module.
+
+ # First obtain the VM state to determine what needs to be done with it.
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+
+ # First handle the case where the VM should be deleted and is not present.
+ if not current_vm_state and vm_state == 'deleted':
+ result['changed'] = False
+ elif module.check_mode:
+ # Shortcut for check mode, if there is no VM yet, it will need to be created.
+ # Or, if the VM is not in the desired state yet, it needs to transition.
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+ # No VM was found that matched the given ID (alias or uuid), so we create it.
+ elif not current_vm_state:
+ result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
+ else:
+ # VM was found, operate on its state directly.
+ result['changed'] = vm_state_transition(module, uuid, vm_state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
new file mode 100644
index 00000000..22556d91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances.
+ - When created, optionally waits for it to be 'running'.
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option.
+ type: str
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance.
+ type: str
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance.
+ type: str
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed.
+ type: str
+ choices:
+ - ams01
+ - ams03
+ - che01
+ - dal01
+ - dal05
+ - dal06
+ - dal09
+ - dal10
+ - dal12
+ - dal13
+ - fra02
+ - fra04
+ - fra05
+ - hkg02
+ - hou02
+ - lon02
+ - lon04
+ - lon06
+ - mel01
+ - mex01
+ - mil01
+ - mon01
+ - osl01
+ - par01
+ - sao01
+ - sea01
+ - seo01
+ - sjc01
+ - sjc03
+ - sjc04
+ - sng01
+ - syd01
+ - syd04
+ - tok02
+ - tor01
+ - wdc01
+ - wdc04
+ - wdc06
+ - wdc07
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance.
+ type: str
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed.
+ type: bool
+ default: 'yes'
+ private:
+ description:
+ - Flag to determine if the instance should be private only.
+ type: bool
+ default: 'no'
+ dedicated:
+ description:
+ - Flag to determine if the instance should be deployed in dedicated space.
+ type: bool
+ default: 'no'
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance.
+ type: bool
+ default: 'yes'
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance.
+ type: int
+ choices: [1, 2, 4, 8, 16, 32, 56]
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance.
+ type: int
+ choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+ flavor:
+ description:
+ - Specify which SoftLayer flavor template to use instead of cpus and memory.
+ version_added: '0.2.0'
+ type: str
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance.
+ default: [ 25 ]
+ type: list
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance.
+ type: str
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance.
+ type: str
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance.
+ choices: [10, 100, 1000]
+ type: int
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC.
+ type: str
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC.
+ type: str
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance.
+ type: list
+ post_uri:
+ description:
+ - URL of a post provisioning script to be loaded and executed on virtual instance.
+ type: str
+ state:
+ description:
+ - Create, or cancel a virtual instance.
+ - Specify C(present) for create, C(absent) to cancel.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ wait:
+ description:
+ - Flag used to wait for active status before returning.
+ type: bool
+ default: 'yes'
+ wait_time:
+ description:
+ - Time in seconds before wait returns.
+ default: 600
+ type: int
+requirements:
+ - python >= 2.6
+ - softlayer >= 4.1.1
+author:
+- Matt Colton (@mcltn)
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instance request
+ community.general.sl_vm:
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: no
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instances request
+ community.general.sl_vm:
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - hostname: instance-2
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: True
+ - hostname: instance-3
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: yes
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Cancel by tag
+ community.general.sl_vm:
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import json
+import time
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+# TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
+ 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
+ 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ datacenter=module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ cpus=module.params.get('cpus'),
+ memory=module.params.get('memory'),
+ flavor=module.params.get('flavor'),
+ hourly=module.params.get('hourly'),
+ datacenter=module.params.get('datacenter'),
+ os_code=module.params.get('os_code'),
+ image_id=module.params.get('image_id'),
+ local_disk=module.params.get('local_disk'),
+ disks=module.params.get('disks'),
+ ssh_keys=module.params.get('ssh_keys'),
+ nic_speed=module.params.get('nic_speed'),
+ private=module.params.get('private'),
+ public_vlan=module.params.get('public_vlan'),
+ private_vlan=module.params.get('private_vlan'),
+ dedicated=module.params.get('dedicated'),
+ post_uri=module.params.get('post_uri'),
+ tags=tags,
+ )
+
+ if instance is not None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module, id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except Exception:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, string_types):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str'),
+ hostname=dict(type='str'),
+ domain=dict(type='str'),
+ datacenter=dict(type='str', choices=DATACENTERS),
+ tags=dict(type='str'),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ flavor=dict(type='str'),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(type='str'),
+ image_id=dict(type='str'),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(type='str'),
+ private_vlan=dict(type='str'),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(type='str'),
+ state=dict(type='str', default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600),
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') is True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
new file mode 100644
index 00000000..8f05da7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
@@ -0,0 +1,1543 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = '''
+---
+module: spotinst_aws_elastigroup
+short_description: Create, update or delete Spotinst AWS Elastigroups
+author: Spotinst (@talzur)
+description:
+ - Can create, update, or delete Spotinst AWS Elastigroups
+ Launch configuration is part of the elastigroup configuration,
+ so no additional modules are necessary for handling the launch configuration.
+ You will have to have a credentials file in this location - <home>/.spotinst/credentials
+ The credentials file must contain a row that looks like this
+ token = <YOUR TOKEN>
+ Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
+requirements:
+ - python >= 2.7
+ - spotinst_sdk >= 1.0.38
+options:
+
+ credentials_path:
+ description:
+ - (Path) Optional parameter that allows to set a non-default credentials path.
+ default: ~/.spotinst/credentials
+ type: path
+
+ account_id:
+ description:
+ - (String) Optional parameter that allows to set an account-id inside the module configuration
+ By default this is retrieved from the credentials path
+ type: str
+
+ availability_vs_cost:
+ description:
+ - (String) The strategy orientation.
+ - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ required: true
+ type: str
+
+ availability_zones:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ name (String),
+ subnet_id (String),
+ placement_group_name (String),
+ required: true
+ type: list
+
+ block_device_mappings:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
+ You can specify virtual devices and EBS volumes.;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ device_name (List of Strings),
+ virtual_name (String),
+ no_device (String),
+ ebs (Object, expects the following keys-
+ delete_on_termination(Boolean),
+ encrypted(Boolean),
+ iops (Integer),
+ snapshot_id(Integer),
+ volume_type(String),
+ volume_size(Integer))
+ type: list
+
+ chef:
+ description:
+ - (Object) The Chef integration configuration.;
+ Expects the following keys - chef_server (String),
+ organization (String),
+ user (String),
+ pem_key (String),
+ chef_version (String)
+ type: dict
+
+ draining_timeout:
+ description:
+ - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
+ type: int
+
+ ebs_optimized:
+ description:
+ - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
+ Note - additional charges will be applied.
+ type: bool
+
+ ebs_volume_pool:
+ description:
+ - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ volume_ids (List of Strings),
+ device_name (String)
+ type: list
+
+ ecs:
+ description:
+ - (Object) The ECS integration configuration.;
+ Expects the following key -
+ cluster_name (String)
+ type: dict
+
+ elastic_ips:
+ description:
+ - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ type: list
+
+ fallback_to_od:
+ description:
+ - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
+ type: bool
+
+ health_check_grace_period:
+ description:
+ - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
+ - If not specified, it defaults to C(300).
+ type: int
+
+ health_check_unhealthy_duration_before_replacement:
+ description:
+ - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
+ type: int
+
+ health_check_type:
+ description:
+ - (String) The service to use for the health check.
+ - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ type: str
+
+ iam_role_name:
+ description:
+ - (String) The instance profile iamRole name
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ iam_role_arn:
+ description:
+ - (String) The instance profile iamRole arn
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ id:
+ description:
+ - (String) The group id if it already exists and you want to update, or delete it.
+ This will not work unless the uniqueness_by field is set to id.
+ When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
+ type: str
+
+ image_id:
+ description:
+ - (String) The image Id used to launch the instance.;
+ In case of conflict between Instance type and image type, an error will be returned
+ required: true
+ type: str
+
+ key_pair:
+ description:
+ - (String) Specify a Key Pair to attach to the instances
+ type: str
+
+ kubernetes:
+ description:
+ - (Object) The Kubernetes integration configuration.
+ Expects the following keys -
+ api_server (String),
+ token (String)
+ type: dict
+
+ lifetime_period:
+ description:
+ - (Integer) lifetime period
+ type: int
+
+ load_balancers:
+ description:
+ - (List of Strings) List of classic ELB names
+ type: list
+
+ max_size:
+ description:
+ - (Integer) The upper limit number of instances that you can scale up to
+ required: true
+ type: int
+
+ mesosphere:
+ description:
+ - (Object) The Mesosphere integration configuration.
+ Expects the following key -
+ api_server (String)
+ type: dict
+
+ min_size:
+ description:
+ - (Integer) The lower limit number of instances that you can scale down to
+ required: true
+ type: int
+
+ monitoring:
+ description:
+ - (String) Describes whether instance Enhanced Monitoring is enabled
+ type: str
+
+ name:
+ description:
+ - (String) Unique name for elastigroup to be created, updated or deleted
+ required: true
+ type: str
+
+ network_interfaces:
+ description:
+ - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ description (String),
+ device_index (Integer),
+ secondary_private_ip_address_count (Integer),
+ associate_public_ip_address (Boolean),
+ delete_on_termination (Boolean),
+ groups (List of Strings),
+ network_interface_id (String),
+ private_ip_address (String),
+ subnet_id (String),
+ associate_ipv6_address (Boolean),
+ private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
+ type: list
+
+ on_demand_count:
+ description:
+ - (Integer) Required if risk is not set
+ - Number of on demand instances to launch. All other instances will be spot instances.;
+ Either set this parameter or the risk parameter
+ type: int
+
+ on_demand_instance_type:
+ description:
+ - (String) On-demand instance type that will be provisioned
+ type: str
+
+ opsworks:
+ description:
+ - (Object) The elastigroup OpsWorks integration configration.;
+ Expects the following key -
+ layer_id (String)
+ type: dict
+
+ persistence:
+ description:
+ - (Object) The Stateful elastigroup configration.;
+ Accepts the following keys -
+ should_persist_root_device (Boolean),
+ should_persist_block_devices (Boolean),
+ should_persist_private_ip (Boolean)
+ type: dict
+
+ product:
+ description:
+ - (String) Operation system type.
+ - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ required: true
+ type: str
+
+ rancher:
+ description:
+ - (Object) The Rancher integration configuration.;
+ Expects the following keys -
+ version (String),
+ access_key (String),
+ secret_key (String),
+ master_host (String)
+ type: dict
+
+ right_scale:
+ description:
+ - (Object) The Rightscale integration configuration.;
+ Expects the following keys -
+ account_id (String),
+ refresh_token (String)
+ type: dict
+
+ risk:
+ description:
+ - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
+ type: int
+
+ roll_config:
+ description:
+ - (Object) Roll configuration.;
+ If you would like the group to roll after updating, please use this feature.
+ Accepts the following keys -
+ batch_size_percentage(Integer, Required),
+ grace_period - (Integer, Required),
+ health_check_type(String, Optional)
+ type: dict
+
+ scheduled_tasks:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ adjustment (Integer),
+ scale_target_capacity (Integer),
+ scale_min_capacity (Integer),
+ scale_max_capacity (Integer),
+ adjustment_percentage (Integer),
+ batch_size_percentage (Integer),
+ cron_expression (String),
+ frequency (String),
+ grace_period (Integer),
+ task_type (String, required),
+ is_enabled (Boolean)
+ type: list
+
+ security_group_ids:
+ description:
+ - (List of Strings) One or more security group IDs. ;
+ In case of update it will override the existing Security Group with the new given array
+ required: true
+ type: list
+
+ shutdown_script:
+ description:
+ - (String) The Base64-encoded shutdown script that executes prior to instance termination.
+ Encode before setting.
+ type: str
+
+ signals:
+ description:
+ - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
+ keys allowed are -
+ name (String, required),
+ timeout (Integer)
+ type: list
+
+ spin_up_time:
+ description:
+ - (Integer) spin up time, in seconds, for the instance
+ type: int
+
+ spot_instance_types:
+ description:
+ - (List of Strings) Spot instance type that will be provisioned.
+ required: true
+ type: list
+
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - (String) create or delete the elastigroup
+ default: present
+ type: str
+
+ tags:
+ description:
+ - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
+ type: list
+
+ target:
+ description:
+ - (Integer) The number of instances to launch
+ required: true
+ type: int
+
+ target_group_arns:
+ description:
+ - (List of Strings) List of target group arns instances should be registered to
+ type: list
+
+ tenancy:
+ description:
+ - (String) dedicated vs shared tenancy.
+ - "The available choices are: C(default), C(dedicated)."
+ type: str
+
+ terminate_at_end_of_billing_hour:
+ description:
+ - (Boolean) terminate at the end of billing hour
+ type: bool
+
+ unit:
+ description:
+ - (String) The capacity unit to launch instances by.
+ - "The available choices are: C(instance), C(weight)."
+ type: str
+
+ up_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
+ statistic (String, required)
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ min_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ down_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
+ statistic (String, required),
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ max_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ target_tracking_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ source (String, required),
+ metric_name (String, required),
+ statistic (String, required),
+ unit (String, required),
+ cooldown (String, required),
+ target (String, required)
+ type: list
+
+ uniqueness_by:
+ choices:
+ - id
+ - name
+ description:
+ - (String) If your group names are not unique, you may use this feature to update or delete a specific group.
+ Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ default: name
+ type: str
+
+ user_data:
+ description:
+ - (String) Base64-encoded MIME user data. Encode before setting the value.
+ type: str
+
+ utilize_reserved_instances:
+ description:
+ - (Boolean) In case of any available Reserved Instances,
+ Elastigroup will utilize your reservations before purchasing Spot instances.
+ type: bool
+
+ wait_for_instances:
+ description:
+ - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - (Integer) How long the module should wait for instances before failing the action.;
+ Only works if wait_for_instances is True.
+ type: int
+
+'''
+EXAMPLES = '''
+# Basic configuration YAML example
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/sda1'
+ ebs:
+ volume_size: 100
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
+# In organizations with more than one account, it is required to specify an account_id
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ ebs:
+ volume_size: 60
+ volume_type: gp2
+ - device_name: '/dev/xvdb'
+ ebs:
+ volume_size: 120
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example we have set up block device mapping with ephemeral devices
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ virtual_name: ephemeral0
+ - device_name: '/dev/xvdb/'
+ virtual_name: ephemeral1
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example we create a basic group configuration with a network interface defined.
+# Each network interface must have a device index
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ network_interfaces:
+ - associate_public_ip_address: true
+ device_index: 0
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+
+# In this example we create a basic group configuration with a target tracking scaling policy defined
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ account_id: act-92d45673
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-79da021e
+ image_id: ami-f173cc91
+ fallback_to_od: true
+ tags:
+ - Creator: ValueOfCreatorTag
+ - Environment: ValueOfEnvironmentTag
+ key_pair: spotinst-labs-oregon
+ max_size: 10
+ min_size: 0
+ target: 2
+ unit: instance
+ monitoring: True
+ name: ansible-group-1
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-46cdc13d
+ spot_instance_types:
+ - c3.large
+ target_tracking_policies:
+ - policy_name: target-tracking-1
+ namespace: AWS/EC2
+ metric_name: CPUUtilization
+ statistic: average
+ unit: percent
+ target: 50
+ cooldown: 120
+ do_not_update:
+ - image_id
+ register: result
+ - ansible.builtin.debug: var=result
+'''
+
+RETURN = '''
+---
+instances:
+ description: List of active elastigroup instances and their details.
+ returned: success
+ type: dict
+ sample: [
+ {
+ "spotInstanceRequestId": "sir-regs25zp",
+ "instanceId": "i-09640ad8678234c",
+ "instanceType": "m4.large",
+ "product": "Linux/UNIX",
+ "availabilityZone": "us-west-2b",
+ "privateIp": "180.0.2.244",
+ "createdAt": "2017-07-17T12:46:18.000Z",
+ "status": "fulfilled"
+ }
+ ]
+group_id:
+ description: Created / Updated group's ID.
+ returned: success
+ type: str
+ sample: "sig-12345"
+
+'''
+
+HAS_SPOTINST_SDK = False
+__metaclass__ = type
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import spotinst_sdk as spotinst
+ from spotinst_sdk import SpotinstClientException
+
+ HAS_SPOTINST_SDK = True
+
+except ImportError:
+ pass
+
+eni_fields = ('description',
+ 'device_index',
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address',
+ 'delete_on_termination',
+ 'groups',
+ 'network_interface_id',
+ 'private_ip_address',
+ 'subnet_id',
+ 'associate_ipv6_address')
+
+private_ip_fields = ('private_ip_address',
+ 'primary')
+
+capacity_fields = (dict(ansible_field_name='min_size',
+ spotinst_field_name='minimum'),
+ dict(ansible_field_name='max_size',
+ spotinst_field_name='maximum'),
+ 'target',
+ 'unit')
+
+lspec_fields = ('user_data',
+ 'key_pair',
+ 'tenancy',
+ 'shutdown_script',
+ 'monitoring',
+ 'ebs_optimized',
+ 'image_id',
+ 'health_check_type',
+ 'health_check_grace_period',
+ 'health_check_unhealthy_duration_before_replacement',
+ 'security_group_ids')
+
+iam_fields = (dict(ansible_field_name='iam_role_name',
+ spotinst_field_name='name'),
+ dict(ansible_field_name='iam_role_arn',
+ spotinst_field_name='arn'))
+
+scheduled_task_fields = ('adjustment',
+ 'adjustment_percentage',
+ 'batch_size_percentage',
+ 'cron_expression',
+ 'frequency',
+ 'grace_period',
+ 'task_type',
+ 'is_enabled',
+ 'scale_target_capacity',
+ 'scale_min_capacity',
+ 'scale_max_capacity')
+
+scaling_policy_fields = ('policy_name',
+ 'namespace',
+ 'metric_name',
+ 'dimensions',
+ 'statistic',
+ 'evaluation_periods',
+ 'period',
+ 'threshold',
+ 'cooldown',
+ 'unit',
+ 'operator')
+
+tracking_policy_fields = ('policy_name',
+ 'namespace',
+ 'source',
+ 'metric_name',
+ 'statistic',
+ 'unit',
+ 'cooldown',
+ 'target',
+ 'threshold')
+
+action_fields = (dict(ansible_field_name='action_type',
+ spotinst_field_name='type'),
+ 'adjustment',
+ 'min_target_capacity',
+ 'max_target_capacity',
+ 'target',
+ 'minimum',
+ 'maximum')
+
+signal_fields = ('name',
+ 'timeout')
+
+multai_lb_fields = ('balancer_id',
+ 'project_id',
+ 'target_set_id',
+ 'az_awareness',
+ 'auto_weight')
+
+persistence_fields = ('should_persist_root_device',
+ 'should_persist_block_devices',
+ 'should_persist_private_ip')
+
+strategy_fields = ('risk',
+ 'utilize_reserved_instances',
+ 'fallback_to_od',
+ 'on_demand_count',
+ 'availability_vs_cost',
+ 'draining_timeout',
+ 'spin_up_time',
+ 'lifetime_period')
+
+ebs_fields = ('delete_on_termination',
+ 'encrypted',
+ 'iops',
+ 'snapshot_id',
+ 'volume_type',
+ 'volume_size')
+
+bdm_fields = ('device_name',
+ 'virtual_name',
+ 'no_device')
+
+kubernetes_fields = ('api_server',
+ 'token')
+
+right_scale_fields = ('account_id',
+ 'refresh_token')
+
+rancher_fields = ('access_key',
+ 'secret_key',
+ 'master_host',
+ 'version')
+
+chef_fields = ('chef_server',
+ 'organization',
+ 'user',
+ 'pem_key',
+ 'chef_version')
+
+az_fields = ('name',
+ 'subnet_id',
+ 'placement_group_name')
+
+opsworks_fields = ('layer_id',)
+
+scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
+
+mesosphere_fields = ('api_server',)
+
+ecs_fields = ('cluster_name',)
+
+multai_fields = ('multai_token',)
+
+
+def handle_elastigroup(client, module):
+ has_changed = False
+ group_id = None
+ message = 'None'
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ uniqueness_by = module.params.get('uniqueness_by')
+ external_group_id = module.params.get('id')
+
+ if uniqueness_by == 'id':
+ if external_group_id is None:
+ should_create = True
+ else:
+ should_create = False
+ group_id = external_group_id
+ else:
+ groups = client.get_elastigroups()
+ should_create, group_id = find_group_with_same_name(groups, name)
+
+ if should_create is True:
+ if state == 'present':
+ eg = expand_elastigroup(module, is_update=False)
+ module.debug(str(" [INFO] " + message + "\n"))
+ group = client.create_elastigroup(group=eg)
+ group_id = group['id']
+ message = 'Created group Successfully.'
+ has_changed = True
+
+ elif state == 'absent':
+ message = 'Cannot delete non-existent group.'
+ has_changed = False
+ else:
+ eg = expand_elastigroup(module, is_update=True)
+
+ if state == 'present':
+ group = client.update_elastigroup(group_update=eg, group_id=group_id)
+ message = 'Updated group successfully.'
+
+ try:
+ roll_config = module.params.get('roll_config')
+ if roll_config:
+ eg_roll = spotinst.aws_elastigroup.Roll(
+ batch_size_percentage=roll_config.get('batch_size_percentage'),
+ grace_period=roll_config.get('grace_period'),
+ health_check_type=roll_config.get('health_check_type')
+ )
+ roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
+ message = 'Updated and started rolling the group successfully.'
+
+ except SpotinstClientException as exc:
+ message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
+ has_changed = True
+
+ elif state == 'absent':
+ try:
+ client.delete_elastigroup(group_id=group_id)
+ except SpotinstClientException as exc:
+ if "GROUP_DOESNT_EXIST" in exc.message:
+ pass
+ else:
+ module.fail_json(msg="Error while attempting to delete group : " + exc.message)
+
+ message = 'Deleted group successfully.'
+ has_changed = True
+
+ return group_id, message, has_changed
+
+
+def retrieve_group_instances(client, module, group_id):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+
+ health_check_type = module.params.get('health_check_type')
+
+ if wait_timeout is None:
+ wait_timeout = 300
+
+ wait_timeout = time.time() + wait_timeout
+ target = module.params.get('target')
+ state = module.params.get('state')
+ instances = list()
+
+ if state == 'present' and group_id is not None and wait_for_instances is True:
+
+ is_amount_fulfilled = False
+ while is_amount_fulfilled is False and wait_timeout > time.time():
+ instances = list()
+ amount_of_fulfilled_instances = 0
+
+ if health_check_type is not None:
+ healthy_instances = client.get_instance_healthiness(group_id=group_id)
+
+ for healthy_instance in healthy_instances:
+ if healthy_instance.get('healthStatus') == 'HEALTHY':
+ amount_of_fulfilled_instances += 1
+ instances.append(healthy_instance)
+
+ else:
+ active_instances = client.get_elastigroup_active_instances(group_id=group_id)
+
+ for active_instance in active_instances:
+ if active_instance.get('private_ip') is not None:
+ amount_of_fulfilled_instances += 1
+ instances.append(active_instance)
+
+ if amount_of_fulfilled_instances >= target:
+ is_amount_fulfilled = True
+
+ time.sleep(10)
+
+ return instances
+
+
+def find_group_with_same_name(groups, name):
+ for group in groups:
+ if group['name'] == name:
+ return False, group.get('id')
+
+ return True, None
+
+
+def expand_elastigroup(module, is_update):
+ do_not_update = module.params['do_not_update']
+ name = module.params.get('name')
+
+ eg = spotinst.aws_elastigroup.Elastigroup()
+ description = module.params.get('description')
+
+ if name is not None:
+ eg.name = name
+ if description is not None:
+ eg.description = description
+
+ # Capacity
+ expand_capacity(eg, module, is_update, do_not_update)
+ # Strategy
+ expand_strategy(eg, module)
+ # Scaling
+ expand_scaling(eg, module)
+ # Third party integrations
+ expand_integrations(eg, module)
+ # Compute
+ expand_compute(eg, module, is_update, do_not_update)
+ # Multai
+ expand_multai(eg, module)
+ # Scheduling
+ expand_scheduled_tasks(eg, module)
+
+ return eg
+
+
+def expand_compute(eg, module, is_update, do_not_update):
+ elastic_ips = module.params['elastic_ips']
+ on_demand_instance_type = module.params.get('on_demand_instance_type')
+ spot_instance_types = module.params['spot_instance_types']
+ ebs_volume_pool = module.params['ebs_volume_pool']
+ availability_zones_list = module.params['availability_zones']
+ product = module.params.get('product')
+
+ eg_compute = spotinst.aws_elastigroup.Compute()
+
+ if product is not None:
+ # Only put product on group creation
+ if is_update is not True:
+ eg_compute.product = product
+
+ if elastic_ips is not None:
+ eg_compute.elastic_ips = elastic_ips
+
+ if on_demand_instance_type or spot_instance_types is not None:
+ eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
+
+ if on_demand_instance_type is not None:
+ eg_instance_types.spot = spot_instance_types
+ if spot_instance_types is not None:
+ eg_instance_types.ondemand = on_demand_instance_type
+
+ if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
+ eg_compute.instance_types = eg_instance_types
+
+ expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
+
+ eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
+
+ expand_launch_spec(eg_compute, module, is_update, do_not_update)
+
+ eg.compute = eg_compute
+
+
+def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
+ if ebs_volumes_list is not None:
+ eg_volumes = []
+
+ for volume in ebs_volumes_list:
+ eg_volume = spotinst.aws_elastigroup.EbsVolume()
+
+ if volume.get('device_name') is not None:
+ eg_volume.device_name = volume.get('device_name')
+ if volume.get('volume_ids') is not None:
+ eg_volume.volume_ids = volume.get('volume_ids')
+
+ if eg_volume.device_name is not None:
+ eg_volumes.append(eg_volume)
+
+ if len(eg_volumes) > 0:
+ eg_compute.ebs_volume_pool = eg_volumes
+
+
+def expand_launch_spec(eg_compute, module, is_update, do_not_update):
+ eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
+
+ if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
+ eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
+
+ tags = module.params['tags']
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ block_device_mappings = module.params['block_device_mappings']
+ network_interfaces = module.params['network_interfaces']
+
+ if is_update is True:
+ if 'image_id' in do_not_update:
+ delattr(eg_launch_spec, 'image_id')
+
+ expand_tags(eg_launch_spec, tags)
+
+ expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
+
+ expand_block_device_mappings(eg_launch_spec, block_device_mappings)
+
+ expand_network_interfaces(eg_launch_spec, network_interfaces)
+
+ eg_compute.launch_specification = eg_launch_spec
+
+
+def expand_integrations(eg, module):
+ rancher = module.params.get('rancher')
+ mesosphere = module.params.get('mesosphere')
+ ecs = module.params.get('ecs')
+ kubernetes = module.params.get('kubernetes')
+ right_scale = module.params.get('right_scale')
+ opsworks = module.params.get('opsworks')
+ chef = module.params.get('chef')
+
+ integration_exists = False
+
+ eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
+
+ if mesosphere is not None:
+ eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
+ integration_exists = True
+
+ if ecs is not None:
+ eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
+ integration_exists = True
+
+ if kubernetes is not None:
+ eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
+ integration_exists = True
+
+ if right_scale is not None:
+ eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
+ integration_exists = True
+
+ if opsworks is not None:
+ eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
+ integration_exists = True
+
+ if rancher is not None:
+ eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
+ integration_exists = True
+
+ if chef is not None:
+ eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
+ integration_exists = True
+
+ if integration_exists:
+ eg.third_parties_integration = eg_integrations
+
+
+def expand_capacity(eg, module, is_update, do_not_update):
+ eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
+
+ if is_update is True:
+ delattr(eg_capacity, 'unit')
+
+ if 'target' in do_not_update:
+ delattr(eg_capacity, 'target')
+
+ eg.capacity = eg_capacity
+
+
+def expand_strategy(eg, module):
+ persistence = module.params.get('persistence')
+ signals = module.params.get('signals')
+
+ eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
+
+ terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
+
+ if terminate_at_end_of_billing_hour is not None:
+ eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
+ module.params, 'ScalingStrategy')
+
+ if persistence is not None:
+ eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
+
+ if signals is not None:
+ eg_signals = expand_list(signals, signal_fields, 'Signal')
+
+ if len(eg_signals) > 0:
+ eg_strategy.signals = eg_signals
+
+ eg.strategy = eg_strategy
+
+
+def expand_multai(eg, module):
+ multai_load_balancers = module.params.get('multai_load_balancers')
+
+ eg_multai = expand_fields(multai_fields, module.params, 'Multai')
+
+ if multai_load_balancers is not None:
+ eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
+
+ if len(eg_multai_load_balancers) > 0:
+ eg_multai.balancers = eg_multai_load_balancers
+ eg.multai = eg_multai
+
+
+def expand_scheduled_tasks(eg, module):
+ scheduled_tasks = module.params.get('scheduled_tasks')
+
+ if scheduled_tasks is not None:
+ eg_scheduling = spotinst.aws_elastigroup.Scheduling()
+
+ eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
+
+ if len(eg_tasks) > 0:
+ eg_scheduling.tasks = eg_tasks
+ eg.scheduling = eg_scheduling
+
+
+def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
+ if load_balancers is not None or target_group_arns is not None:
+ eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
+ eg_total_lbs = []
+
+ if load_balancers is not None:
+ for elb_name in load_balancers:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if elb_name is not None:
+ eg_elb.name = elb_name
+ eg_elb.type = 'CLASSIC'
+ eg_total_lbs.append(eg_elb)
+
+ if target_group_arns is not None:
+ for target_arn in target_group_arns:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if target_arn is not None:
+ eg_elb.arn = target_arn
+ eg_elb.type = 'TARGET_GROUP'
+ eg_total_lbs.append(eg_elb)
+
+ if len(eg_total_lbs) > 0:
+ eg_load_balancers_config.load_balancers = eg_total_lbs
+ eg_launchspec.load_balancers_config = eg_load_balancers_config
+
+
+def expand_tags(eg_launchspec, tags):
+ if tags is not None:
+ eg_tags = []
+
+ for tag in tags:
+ eg_tag = spotinst.aws_elastigroup.Tag()
+ if tag.keys():
+ eg_tag.tag_key = tag.keys()[0]
+ if tag.values():
+ eg_tag.tag_value = tag.values()[0]
+
+ eg_tags.append(eg_tag)
+
+ if len(eg_tags) > 0:
+ eg_launchspec.tags = eg_tags
+
+
+def expand_block_device_mappings(eg_launchspec, bdms):
+ if bdms is not None:
+ eg_bdms = []
+
+ for bdm in bdms:
+ eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
+
+ if bdm.get('ebs') is not None:
+ eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
+
+ eg_bdms.append(eg_bdm)
+
+ if len(eg_bdms) > 0:
+ eg_launchspec.block_device_mappings = eg_bdms
+
+
+def expand_network_interfaces(eg_launchspec, enis):
+ if enis is not None:
+ eg_enis = []
+
+ for eni in enis:
+ eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
+
+ eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
+
+ if eg_pias is not None:
+ eg_eni.private_ip_addresses = eg_pias
+
+ eg_enis.append(eg_eni)
+
+ if len(eg_enis) > 0:
+ eg_launchspec.network_interfaces = eg_enis
+
+
+def expand_scaling(eg, module):
+ up_scaling_policies = module.params['up_scaling_policies']
+ down_scaling_policies = module.params['down_scaling_policies']
+ target_tracking_policies = module.params['target_tracking_policies']
+
+ eg_scaling = spotinst.aws_elastigroup.Scaling()
+
+ if up_scaling_policies is not None:
+ eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
+ if len(eg_up_scaling_policies) > 0:
+ eg_scaling.up = eg_up_scaling_policies
+
+ if down_scaling_policies is not None:
+ eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
+ if len(eg_down_scaling_policies) > 0:
+ eg_scaling.down = eg_down_scaling_policies
+
+ if target_tracking_policies is not None:
+ eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
+ if len(eg_target_tracking_policies) > 0:
+ eg_scaling.target = eg_target_tracking_policies
+
+ if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
+ eg.scaling = eg_scaling
+
+
+def expand_list(items, fields, class_name):
+ if items is not None:
+ new_objects_list = []
+ for item in items:
+ new_obj = expand_fields(fields, item, class_name)
+ new_objects_list.append(new_obj)
+
+ return new_objects_list
+
+
+def expand_fields(fields, item, class_name):
+ class_ = getattr(spotinst.aws_elastigroup, class_name)
+ new_obj = class_()
+
+ # Handle primitive fields
+ if item is not None:
+ for field in fields:
+ if isinstance(field, dict):
+ ansible_field_name = field['ansible_field_name']
+ spotinst_field_name = field['spotinst_field_name']
+ else:
+ ansible_field_name = field
+ spotinst_field_name = field
+ if item.get(ansible_field_name) is not None:
+ setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
+
+ return new_obj
+
+
+def expand_scaling_policies(scaling_policies):
+ eg_scaling_policies = []
+
+ for policy in scaling_policies:
+ eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
+ eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
+ eg_scaling_policies.append(eg_policy)
+
+ return eg_scaling_policies
+
+
+def expand_target_tracking_policies(tracking_policies):
+ eg_tracking_policies = []
+
+ for policy in tracking_policies:
+ eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
+ eg_tracking_policies.append(eg_policy)
+
+ return eg_tracking_policies
+
+
+def main():
+ fields = dict(
+ account_id=dict(type='str'),
+ availability_vs_cost=dict(type='str', required=True),
+ availability_zones=dict(type='list', required=True),
+ block_device_mappings=dict(type='list'),
+ chef=dict(type='dict'),
+ credentials_path=dict(type='path', default="~/.spotinst/credentials"),
+ do_not_update=dict(default=[], type='list'),
+ down_scaling_policies=dict(type='list'),
+ draining_timeout=dict(type='int'),
+ ebs_optimized=dict(type='bool'),
+ ebs_volume_pool=dict(type='list'),
+ ecs=dict(type='dict'),
+ elastic_beanstalk=dict(type='dict'),
+ elastic_ips=dict(type='list'),
+ fallback_to_od=dict(type='bool'),
+ id=dict(type='str'),
+ health_check_grace_period=dict(type='int'),
+ health_check_type=dict(type='str'),
+ health_check_unhealthy_duration_before_replacement=dict(type='int'),
+ iam_role_arn=dict(type='str'),
+ iam_role_name=dict(type='str'),
+ image_id=dict(type='str', required=True),
+ key_pair=dict(type='str'),
+ kubernetes=dict(type='dict'),
+ lifetime_period=dict(type='int'),
+ load_balancers=dict(type='list'),
+ max_size=dict(type='int', required=True),
+ mesosphere=dict(type='dict'),
+ min_size=dict(type='int', required=True),
+ monitoring=dict(type='str'),
+ multai_load_balancers=dict(type='list'),
+ multai_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ network_interfaces=dict(type='list'),
+ on_demand_count=dict(type='int'),
+ on_demand_instance_type=dict(type='str'),
+ opsworks=dict(type='dict'),
+ persistence=dict(type='dict'),
+ product=dict(type='str', required=True),
+ rancher=dict(type='dict'),
+ right_scale=dict(type='dict'),
+ risk=dict(type='int'),
+ roll_config=dict(type='dict'),
+ scheduled_tasks=dict(type='list'),
+ security_group_ids=dict(type='list', required=True),
+ shutdown_script=dict(type='str'),
+ signals=dict(type='list'),
+ spin_up_time=dict(type='int'),
+ spot_instance_types=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list'),
+ target=dict(type='int', required=True),
+ target_group_arns=dict(type='list'),
+ tenancy=dict(type='str'),
+ terminate_at_end_of_billing_hour=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ unit=dict(type='str'),
+ user_data=dict(type='str'),
+ utilize_reserved_instances=dict(type='bool'),
+ uniqueness_by=dict(default='name', choices=['name', 'id']),
+ up_scaling_policies=dict(type='list'),
+ target_tracking_policies=dict(type='list'),
+ wait_for_instances=dict(type='bool', default=False),
+ wait_timeout=dict(type='int')
+ )
+
+ module = AnsibleModule(argument_spec=fields)
+
+ if not HAS_SPOTINST_SDK:
+ module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
+
+ # Retrieve creds file variables
+ creds_file_loaded_vars = dict()
+
+ credentials_path = module.params.get('credentials_path')
+
+ try:
+ with open(credentials_path, "r") as creds:
+ for line in creds:
+ eq_index = line.find('=')
+ var_name = line[:eq_index].strip()
+ string_value = line[eq_index + 1:].strip()
+ creds_file_loaded_vars[var_name] = string_value
+ except IOError:
+ pass
+ # End of creds file retrieval
+
+ token = module.params.get('token')
+ if not token:
+ token = os.environ.get('SPOTINST_TOKEN')
+ if not token:
+ token = creds_file_loaded_vars.get("token")
+
+ account = module.params.get('account_id')
+ if not account:
+ account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
+ if not account:
+ account = creds_file_loaded_vars.get("account")
+
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False)
+
+ if account is not None:
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
+
+ group_id, message, has_changed = handle_elastigroup(client=client, module=module)
+
+ instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
+
+ module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
new file mode 100644
index 00000000..db89bd46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+ - Univention
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS record on a UCS
+ community.general.udm_dns_record:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ a:
+ - 192.0.2.1
+ - 2001:0db8::42
+'''
+
+
+RETURN = '''#'''
+
+HAVE_UNIVENTION = False
+try:
+ from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+ )
+ HAVE_UNIVENTION = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ type='str'),
+ name=dict(required=True,
+ type='str'),
+ data=dict(default=[],
+ type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['data'])
+ ])
+ )
+
+ if not HAVE_UNIVENTION:
+ module.fail_json(msg="This module requires univention python bindings")
+
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
+ dn = 'relativeDomainName={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
new file mode 100644
index 00000000..2428650e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ - "The available choices are: C(forward_zone), C(reverse_zone)."
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS zone on a UCS
+ community.general.udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{0}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list'),
+ interfaces=dict(default=[],
+ type='list'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{0}'.format(base_dn())
+ dn = 'zoneName={0},{1}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{0}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{0}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
new file mode 100644
index 00000000..d2cf2aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ type: str
+ description:
+ required: false
+ description:
+ - Group description.
+ type: str
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ type: str
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ type: str
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+ type: str
+ default: "cn=groups"
+'''
+
+
+EXAMPLES = '''
+- name: Create a POSIX group
+ community.general.udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+
+# or
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(default=None,
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={0},{1}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing group {0} in {1} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing group {0} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
new file mode 100644
index 00000000..3e8fb207
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name
+ type: str
+ host:
+ required: false
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ type: str
+ path:
+ required: false
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ type: path
+ sambaName:
+ required: false
+ description:
+ - Windows name. Required if C(state=present).
+ type: str
+ aliases: [ samba_name ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ type: str
+ owner:
+ default: '0'
+ description:
+ - Directory owner of the share's root directory.
+ type: str
+ group:
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ type: str
+ directorymode:
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ type: str
+ root_squash:
+ default: true
+ description:
+ - Modify user ID for root user (root squashing).
+ type: bool
+ subtree_checking:
+ default: true
+ description:
+ - Subtree checking.
+ type: bool
+ sync:
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ type: str
+ writeable:
+ default: true
+ description:
+ - NFS write access.
+ type: bool
+ sambaBlockSize:
+ description:
+ - Blocking size.
+ type: str
+ aliases: [ samba_block_size ]
+ sambaBlockingLocks:
+ default: true
+ description:
+ - Blocking locks.
+ type: bool
+ aliases: [ samba_blocking_locks ]
+ sambaBrowseable:
+ description:
+ - Show in Windows network environment.
+ type: bool
+ default: True
+ aliases: [ samba_browsable ]
+ sambaCreateMode:
+ default: '0744'
+ description:
+ - File mode.
+ type: str
+ aliases: [ samba_create_mode ]
+ sambaCscPolicy:
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ type: str
+ aliases: [ samba_csc_policy ]
+ sambaCustomSettings:
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ type: list
+ aliases: [ samba_custom_settings ]
+ sambaDirectoryMode:
+ default: '0755'
+ description:
+ - Directory mode.
+ type: str
+ aliases: [ samba_directory_mode ]
+ sambaDirectorySecurityMode:
+ default: '0777'
+ description:
+ - Directory security mode.
+ type: str
+ aliases: [ samba_directory_security_mode ]
+ sambaDosFilemode:
+ default: false
+ description:
+ - Users with write access may modify permissions.
+ type: bool
+ aliases: [ samba_dos_filemode ]
+ sambaFakeOplocks:
+ default: false
+ description:
+ - Fake oplocks.
+ type: bool
+ aliases: [ samba_fake_oplocks ]
+ sambaForceCreateMode:
+ default: false
+ description:
+ - Force file mode.
+ type: bool
+ aliases: [ samba_force_create_mode ]
+ sambaForceDirectoryMode:
+ default: false
+ description:
+ - Force directory mode.
+ type: bool
+ aliases: [ samba_force_directory_mode ]
+ sambaForceDirectorySecurityMode:
+ default: false
+ description:
+ - Force directory security mode.
+ type: bool
+ aliases: [ samba_force_directory_security_mode ]
+ sambaForceGroup:
+ description:
+ - Force group.
+ type: str
+ aliases: [ samba_force_group ]
+ sambaForceSecurityMode:
+ default: false
+ description:
+ - Force security mode.
+ type: bool
+ aliases: [ samba_force_security_mode ]
+ sambaForceUser:
+ description:
+ - Force user.
+ type: str
+ aliases: [ samba_force_user ]
+ sambaHideFiles:
+ description:
+ - Hide files.
+ type: str
+ aliases: [ samba_hide_files ]
+ sambaHideUnreadable:
+ default: false
+ description:
+ - Hide unreadable files/directories.
+ type: bool
+ aliases: [ samba_hide_unreadable ]
+ sambaHostsAllow:
+ default: []
+ description:
+ - Allowed host/network.
+ type: list
+ aliases: [ samba_hosts_allow ]
+ sambaHostsDeny:
+ default: []
+ description:
+ - Denied host/network.
+ type: list
+ aliases: [ samba_hosts_deny ]
+ sambaInheritAcls:
+ default: true
+ description:
+ - Inherit ACLs.
+ type: bool
+ aliases: [ samba_inherit_acls ]
+ sambaInheritOwner:
+ default: false
+ description:
+ - Create files/directories with the owner of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_owner ]
+ sambaInheritPermissions:
+ default: false
+ description:
+ - Create files/directories with permissions of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_permissions ]
+ sambaInvalidUsers:
+ description:
+ - Invalid users or groups.
+ type: str
+ aliases: [ samba_invalid_users ]
+ sambaLevel2Oplocks:
+ default: true
+ description:
+ - Level 2 oplocks.
+ type: bool
+ aliases: [ samba_level_2_oplocks ]
+ sambaLocking:
+ default: true
+ description:
+ - Locking.
+ type: bool
+ aliases: [ samba_locking ]
+ sambaMSDFSRoot:
+ default: false
+ description:
+ - MSDFS root.
+ type: bool
+ aliases: [ samba_msdfs_root ]
+ sambaNtAclSupport:
+ default: true
+ description:
+ - NT ACL support.
+ type: bool
+ aliases: [ samba_nt_acl_support ]
+ sambaOplocks:
+ default: true
+ description:
+ - Oplocks.
+ type: bool
+ aliases: [ samba_oplocks ]
+ sambaPostexec:
+ description:
+ - Postexec script.
+ type: str
+ aliases: [ samba_postexec ]
+ sambaPreexec:
+ description:
+ - Preexec script.
+ type: str
+ aliases: [ samba_preexec ]
+ sambaPublic:
+ default: false
+ description:
+ - Allow anonymous read-only access with a guest user.
+ type: bool
+ aliases: [ samba_public ]
+ sambaSecurityMode:
+ default: '0777'
+ description:
+ - Security mode.
+ type: str
+ aliases: [ samba_security_mode ]
+ sambaStrictLocking:
+ default: 'Auto'
+ description:
+ - Strict locking.
+ type: str
+ aliases: [ samba_strict_locking ]
+ sambaVFSObjects:
+ description:
+ - VFS objects.
+ type: str
+ aliases: [ samba_vfs_objects ]
+ sambaValidUsers:
+ description:
+ - Valid users or groups.
+ type: str
+ aliases: [ samba_valid_users ]
+ sambaWriteList:
+ description:
+ - Restrict write access to these users/groups.
+ type: str
+ aliases: [ samba_write_list ]
+ sambaWriteable:
+ default: true
+ description:
+ - Samba write access.
+ type: bool
+ aliases: [ samba_writeable ]
+ nfs_hosts:
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ type: list
+ nfsCustomSettings:
+ default: []
+ description:
+ - Option name in exports file.
+ type: list
+ aliases: [ nfs_custom_settings ]
+'''
+
+
+EXAMPLES = '''
+- name: Create a share named home on the server ucs.example.com with the path /home
+ community.general.udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path',
+ default=None),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str',
+ default=None),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
+ dn = 'cn={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as err:
+ module.fail_json(
+ msg='Creating/editing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as err:
+ module.fail_json(
+ msg='Removing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
new file mode 100644
index 00000000..efbd95f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: udm_user
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ type: str
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ type: str
+ firstname:
+ description:
+ - First name. Required if C(state=present).
+ type: str
+ lastname:
+ description:
+ - Last name. Required if C(state=present).
+ type: str
+ password:
+ description:
+ - Password. Required if C(state=present).
+ type: str
+ birthday:
+ description:
+ - Birthday
+ type: str
+ city:
+ description:
+ - City of users business address.
+ type: str
+ country:
+ description:
+ - Country of users business address.
+ type: str
+ department_number:
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ type: str
+ description:
+ description:
+ - Description (not gecos)
+ type: str
+ display_name:
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ type: str
+ email:
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ type: list
+ employee_number:
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ type: str
+ employee_type:
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ type: str
+ gecos:
+ description:
+ - GECOS
+ type: str
+ groups:
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ type: list
+ home_share:
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ type: str
+ home_share_path:
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ type: str
+ home_telephone_number:
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ type: list
+ homedrive:
+ description:
+ - Windows home drive, e.g. C("H:").
+ type: str
+ mail_alternative_address:
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ type: list
+ mail_home_server:
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ type: str
+ mail_primary_address:
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ type: str
+ mobile_telephone_number:
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ type: list
+ organisation:
+ description:
+ - Organisation
+ aliases: [ organization ]
+ type: str
+ overridePWHistory:
+ type: bool
+ default: 'no'
+ description:
+ - Override password history
+ aliases: [ override_pw_history ]
+ overridePWLength:
+ type: bool
+ default: 'no'
+ description:
+ - Override password check
+ aliases: [ override_pw_length ]
+ pager_telephonenumber:
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ type: list
+ phone:
+ description:
+ - List of telephone numbers.
+ type: list
+ postcode:
+ description:
+ - Postal code of users business address.
+ type: str
+ primary_group:
+ description:
+ - Primary group. This must be the group LDAP DN.
+ - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ aliases: [ primaryGroup ]
+ type: str
+ profilepath:
+ description:
+ - Windows profile directory
+ type: str
+ pwd_change_next_login:
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ type: str
+ room_number:
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ type: str
+ samba_privileges:
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ type: list
+ samba_user_workstations:
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ type: list
+ sambahome:
+ description:
+ - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ type: str
+ scriptpath:
+ description:
+ - Windows logon script.
+ type: str
+ secretary:
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ type: list
+ serviceprovider:
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ type: list
+ shell:
+ default: '/bin/bash'
+ description:
+ - Login shell
+ type: str
+ street:
+ description:
+ - Street of users business address.
+ type: str
+ title:
+ description:
+ - Title, e.g. C(Prof.).
+ type: str
+ unixhome:
+ description:
+ - Unix home directory
+ - If not specified, it defaults to C(/home/$USERNAME).
+ type: str
+ userexpiry:
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ - If not specified, it defaults to the current day plus one year.
+ type: str
+ position:
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ type: str
+ update_password:
+ default: always
+ choices: [ always, on_create ]
+ description:
+ - "C(always) will update passwords if they differ.
+ C(on_create) will only set the password for newly created users."
+ type: str
+ ou:
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ type: str
+ subpath:
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create a user on a UCS
+ community.general.udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+
+# or define the position
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+import crypt
+from datetime import date, timedelta
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec=dict(
+ birthday=dict(type='str'),
+ city=dict(type='str'),
+ country=dict(type='str'),
+ department_number=dict(type='str',
+ aliases=['departmentNumber']),
+ description=dict(type='str'),
+ display_name=dict(type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list'),
+ employee_number=dict(type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(type='str',
+ aliases=['employeeType']),
+ firstname=dict(type='str'),
+ gecos=dict(type='str'),
+ groups=dict(default=[],
+ type='list'),
+ home_share=dict(type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(type='str'),
+ lastname=dict(type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(type='str',
+ aliases=['organization']),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password=dict(type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list'),
+ postcode=dict(type='str'),
+ primary_group=dict(type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(type='str'),
+ pwd_change_next_login=dict(type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(type='str'),
+ scriptpath=dict(type='str'),
+ secretary=dict(default=[],
+ type='list'),
+ serviceprovider=dict(default=[''],
+ type='list'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(type='str'),
+ title=dict(type='str'),
+ unixhome=dict(type='str'),
+ userexpiry=dict(type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={0}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={0},{1}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{0} {1}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{0}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ if 'userexpiry' in obj and obj.get('userexpiry') is None:
+ obj['userexpiry'] = expiry
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ if module.params['update_password'] == 'always':
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing user {0} in {1} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Adding groups to user {0} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing user {0} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
new file mode 100644
index 00000000..9a69ce54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an C(autostart.cgi) script
+ type: bool
+ default: 'no'
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ default: ''
+
+ port_open:
+ description:
+ - IF the port should be opened
+ type: bool
+ default: 'no'
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ community.general.webfaction_app:
+ name: "my_wsgi_app1"
+ state: present
+ type: mod_wsgi35-python27
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(required=False, type='bool', default=False),
+ extra_info=dict(required=False, default=""),
+ port_open=dict(required=False, type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed=False,
+ result=existing_app,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(app_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
new file mode 100644
index 00000000..19bc6ea2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a webfaction database using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ community.general.webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type=dict(required=True, choices=['mysql', 'postgresql']),
+ password=dict(required=False, default=None, no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed=False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(db_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
new file mode 100644
index 00000000..a348ef51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains=dict(required=False, default=[], type='list'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
new file mode 100644
index 00000000..144fad29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create webfaction mailbox using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ community.general.webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True, no_log=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
new file mode 100644
index 00000000..8ae98280
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction website using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ type: bool
+ default: 'no'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ default: []
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create website
+ community.general.webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(required=False, type='list', default=[]),
+ site_apps=dict(required=False, type='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed=False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append((a[0], a[1]))
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
new file mode 100644
index 00000000..a9a5fb4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
@@ -0,0 +1,1933 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest
+short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ modify various virtual machine components like network and disk, rename a virtual machine and
+ remove a virtual machine with associated components.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
+ XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
+ detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
+ values C(none) and C(dhcp) have same effect. More info here:
+ U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
+- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
+ to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
+ Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
+ useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ U(https://support.citrix.com/article/CTX226713)'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
+ - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
+ - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ type: str
+ default: present
+ choices: [ present, absent, poweredon ]
+ name:
+ description:
+ - Name of the VM to work with.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ required: yes
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - VM description.
+ type: str
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
+ type: str
+ template:
+ description:
+ - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
+ - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
+ - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
+ - If VM already exists, this setting will be ignored.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ template_src ]
+ template_uuid:
+ description:
+ - UUID of a template, an existing VM or a snapshot that should be used to create VM.
+ - It is required if template name is not unique.
+ type: str
+ is_template:
+ description:
+ - Convert VM to template.
+ type: bool
+ default: no
+ folder:
+ description:
+ - Destination folder for VM.
+ - This parameter is case sensitive.
+ - 'Example:'
+ - ' folder: /folder1/folder2'
+ type: str
+ hardware:
+ description:
+ - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
+ - 'Valid parameters are:'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ type: dict
+ disks:
+ description:
+ - A list of disks to add to VM.
+ - All parameters are case sensitive.
+ - Removing or detaching existing disks of VM is not supported.
+ - 'Required parameters per entry:'
+ - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
+ - 'Optional parameters per entry:'
+ - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
+ - ' - C(name_desc) (string): Disk description.'
+ - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
+ - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
+ type: list
+ elements: dict
+ aliases: [ disk ]
+ cdrom:
+ description:
+ - A CD-ROM configuration for the VM.
+ - All parameters are case sensitive.
+ - 'Valid parameters are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
+ - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
+ Required if C(type) is set to C(iso).'
+ type: dict
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - All parameters are case sensitive.
+ - 'Required parameters per entry:'
+ - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
+ - 'Optional parameters per entry (used for VM hardware):'
+ - ' - C(mac) (string): Customize MAC address of the interface.'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
+ - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
+ - ' - C(gateway) (string): Static IPv4 gateway.'
+ - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
+ - ' - C(gateway6) (string): Static IPv6 gateway.'
+ type: list
+ elements: dict
+ aliases: [ network ]
+ home_server:
+ description:
+ - Name of a XenServer host that will be a Home Server for the VM.
+ - This parameter is case sensitive.
+ type: str
+ custom_params:
+ description:
+ - Define a list of custom VM params to set on VM.
+ - Useful for advanced users familiar with managing VM params trough xe CLI.
+ - A custom value object takes two fields C(key) and C(value) (see example below).
+ type: list
+ elements: dict
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+ linked_clone:
+ description:
+ - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
+ - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
+ type: bool
+ default: no
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM from a template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: CentOS 7
+ disks:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ memory_mb: 512
+ cdrom:
+ type: iso
+ iso_name: guest-tools.iso
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Create a VM template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_6
+ is_template: yes
+ disk:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ delegate_to: localhost
+ register: deploy
+
+- name: Rename a VM (requires the VM's UUID)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a VM by UUID
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ delegate_to: localhost
+
+- name: Modify custom params (boot order)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_8
+ state: present
+ custom_params:
+ - key: HVM_boot_params
+ value: { "order": "ndc" }
+ delegate_to: localhost
+
+- name: Customize network parameters
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_10
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100/24
+ gateway: 192.168.1.1
+ - type: dhcp
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+changes:
+ description: Detected or made changes to VM
+ returned: always
+ type: list
+ sample: [
+ {
+ "hardware": [
+ "num_cpus"
+ ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ],
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
+ is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
+ is_valid_ip6_addr, is_valid_ip6_prefix)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def exists(self):
+ """Returns True if VM exists, else False."""
+ return True if self.vm_ref is not None else False
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+ def deploy(self):
+ """Deploys new VM from template."""
+ # Safety check.
+ if self.exists():
+ self.module.fail_json(msg="Called deploy on existing VM!")
+
+ try:
+ templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
+ msg_prefix="VM deploy: ")
+
+ # Is this an existing running VM?
+ if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
+ self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
+
+ # Find a SR we can use for VM.copy(). We use SR of the first disk
+ # if specified or default SR if not specified.
+ disk_params_list = self.module.params['disks']
+
+ sr_ref = None
+
+ if disk_params_list:
+ disk_params = disk_params_list[0]
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM deploy disks[0]: ")
+
+ if not sr_ref:
+ if self.default_sr_ref != "OpaqueRef:NULL":
+ sr_ref = self.default_sr_ref
+ else:
+ self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
+
+ # VM name could be an empty string which is bad.
+ if self.module.params['name'] is not None and not self.module.params['name']:
+ self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Now we can instantiate VM. We use VM.clone for linked_clone and
+ # VM.copy for non linked_clone.
+ if self.module.params['linked_clone']:
+ self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
+ else:
+ self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
+
+ # Description is copied over from template so we reset it.
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
+
+ # If template is one of built-in XenServer templates, we have to
+ # do some additional steps.
+ # Note: VM.get_is_default_template() is supported from XenServer 7.2
+ # onward so we use an alternative way.
+ templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
+
+ if "default_template" in templ_other_config and templ_other_config['default_template']:
+ # other_config of built-in XenServer templates have a key called
+ # 'disks' with the following content:
+ # disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
+ # This value of other_data is copied to cloned or copied VM and
+ # it prevents provisioning of VM because sr is not specified and
+ # XAPI returns an error. To get around this, we remove the
+ # 'disks' key and add disks to VM later ourselves.
+ vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
+
+ if "disks" in vm_other_config:
+ del vm_other_config['disks']
+
+ self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
+
+ # At this point we have VM ready for provisioning.
+ self.xapi_session.xenapi.VM.provision(self.vm_ref)
+
+ # After provisioning we can prepare vm_params for reconfigure().
+ self.gather_params()
+
+ # VM is almost ready. We just need to reconfigure it...
+ self.reconfigure()
+
+ # Power on VM if needed.
+ if self.module.params['state'] == "poweredon":
+ self.set_power_state("poweredon")
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def reconfigure(self):
+ """Reconfigures an existing VM.
+
+ Returns:
+ list: parameters that were reconfigured.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called reconfigure on non existing VM!")
+
+ config_changes = self.get_changes()
+
+ vm_power_state_save = self.vm_params['power_state'].lower()
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return config_changes
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
+ self.set_power_state("shutdownguest")
+
+ try:
+ for change in config_changes:
+ if isinstance(change, six.string_types):
+ if change == "name":
+ self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
+ elif change == "name_desc":
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
+ elif change == "folder":
+ self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
+
+ if self.module.params['folder']:
+ self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
+ elif change == "home_server":
+ if self.module.params['home_server']:
+ host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
+ else:
+ host_ref = "OpaqueRef:NULL"
+
+ self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
+ elif isinstance(change, dict):
+ if change.get('hardware'):
+ for hardware_change in change['hardware']:
+ if hardware_change == "num_cpus":
+ num_cpus = int(self.module.params['hardware']['num_cpus'])
+
+ if num_cpus < int(self.vm_params['VCPUs_at_startup']):
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ else:
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ elif hardware_change == "num_cpu_cores_per_socket":
+ self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
+ num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
+
+ if num_cpu_cores_per_socket > 1:
+ self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
+ elif hardware_change == "memory_mb":
+ memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
+ vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
+
+ self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
+ elif change.get('disks_changed'):
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+ position = 0
+
+ for disk_change_list in change['disks_changed']:
+ for disk_change in disk_change_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
+
+ if disk_change == "name":
+ self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
+ elif disk_change == "name_desc":
+ self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
+ elif disk_change == "size":
+ self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
+ "VM reconfigure disks[%s]: " % position)))
+
+ position += 1
+ elif change.get('disks_new'):
+ for position, disk_userdevice in change['disks_new']:
+ disk_params = self.module.params['disks'][position]
+
+ disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
+ disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
+
+ if disk_params.get('sr_uuid'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
+ elif disk_params.get('sr'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
+ else:
+ sr_ref = self.default_sr_ref
+
+ disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
+
+ new_disk_vdi = {
+ "name_label": disk_name,
+ "name_description": disk_name_desc,
+ "SR": sr_ref,
+ "virtual_size": disk_size,
+ "type": "user",
+ "sharable": False,
+ "read_only": False,
+ "other_config": {},
+ }
+
+ new_disk_vbd = {
+ "VM": self.vm_ref,
+ "VDI": None,
+ "userdevice": disk_userdevice,
+ "bootable": False,
+ "mode": "RW",
+ "type": "Disk",
+ "empty": False,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
+ vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
+
+ elif change.get('cdrom'):
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If there is no CD present, we have to create one.
+ if not vm_cdrom_params_list:
+ # We will try to place cdrom at userdevice position
+ # 3 (which is default) if it is not already occupied
+ # else we will place it at first allowed position.
+ cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if "3" in cdrom_userdevices_allowed:
+ cdrom_userdevice = "3"
+ else:
+ cdrom_userdevice = cdrom_userdevices_allowed[0]
+
+ cdrom_vbd = {
+ "VM": self.vm_ref,
+ "VDI": "OpaqueRef:NULL",
+ "userdevice": cdrom_userdevice,
+ "bootable": False,
+ "mode": "RO",
+ "type": "CD",
+ "empty": True,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
+ else:
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
+
+ cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
+
+ for cdrom_change in change['cdrom']:
+ if cdrom_change == "type":
+ cdrom_type = self.module.params['cdrom']['type']
+
+ if cdrom_type == "none" and not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+ elif cdrom_type == "host":
+ # Unimplemented!
+ pass
+
+ elif cdrom_change == "iso_name":
+ if not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+
+ cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
+ self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
+ elif change.get('networks_changed'):
+ position = 0
+
+ for network_change_list in change['networks_changed']:
+ if network_change_list:
+ vm_vif_params = self.vm_params['VIFs'][position]
+ network_params = self.module.params['networks'][position]
+
+ vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
+ network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
+
+ vif_recreated = False
+
+ if "name" in network_change_list or "mac" in network_change_list:
+ # To change network or MAC, we destroy old
+ # VIF and then create a new one with changed
+ # parameters. That's how XenCenter does it.
+
+ # Copy all old parameters to new VIF record.
+ vif = {
+ "device": vm_vif_params['device'],
+ "network": network_ref,
+ "VM": vm_vif_params['VM'],
+ "MAC": vm_vif_params['MAC'],
+ "MTU": vm_vif_params['MTU'],
+ "other_config": vm_vif_params['other_config'],
+ "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
+ "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
+ "locking_mode": vm_vif_params['locking_mode'],
+ "ipv4_allowed": vm_vif_params['ipv4_allowed'],
+ "ipv6_allowed": vm_vif_params['ipv6_allowed'],
+ }
+
+ if "name" in network_change_list:
+ network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+ vif['network'] = network_ref_new
+ vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
+
+ if "mac" in network_change_list:
+ vif['MAC'] = network_params['mac'].lower()
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.unplug(vif_ref)
+
+ self.xapi_session.xenapi.VIF.destroy(vif_ref)
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ vif_ref = vif_ref_new
+ vif_recreated = True
+
+ if self.vm_params['customization_agent'] == "native":
+ vif_reconfigure_needed = False
+
+ if "type" in network_change_list:
+ network_type = network_params['type'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type = vm_vif_params['ipv4_configuration_mode']
+
+ if "ip" in network_change_list:
+ network_ip = network_params['ip']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses']:
+ network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
+ else:
+ network_ip = ""
+
+ if "prefix" in network_change_list:
+ network_prefix = "/%s" % network_params['prefix']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ else:
+ network_prefix = ""
+
+ if "gateway" in network_change_list:
+ network_gateway = network_params['gateway']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway = vm_vif_params['ipv4_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
+ "%s%s" % (network_ip, network_prefix), network_gateway)
+
+ vif_reconfigure_needed = False
+
+ if "type6" in network_change_list:
+ network_type6 = network_params['type6'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type6 = vm_vif_params['ipv6_configuration_mode']
+
+ if "ip6" in network_change_list:
+ network_ip6 = network_params['ip6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses']:
+ network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
+ else:
+ network_ip6 = ""
+
+ if "prefix6" in network_change_list:
+ network_prefix6 = "/%s" % network_params['prefix6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
+ else:
+ network_prefix6 = ""
+
+ if "gateway6" in network_change_list:
+ network_gateway6 = network_params['gateway6']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway6 = vm_vif_params['ipv6_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
+ "%s%s" % (network_ip6, network_prefix6), network_gateway6)
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vif_device = vm_vif_params['device']
+
+ # A user could have manually changed network
+ # or mac e.g. trough XenCenter and then also
+ # make those changes in playbook manually.
+ # In that case, module will not detect any
+ # changes and info in xenstore_data will
+ # become stale. For that reason we always
+ # update name and mac in xenstore_data.
+
+ # Since we handle name and mac differently,
+ # we have to remove them from
+ # network_change_list.
+ network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
+
+ for network_change in network_change_list_tmp + ['name', 'mac']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change))
+
+ if network_params.get('name'):
+ network_name = network_params['name']
+ else:
+ network_name = vm_vif_params['network']['name_label']
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
+
+ if network_params.get('mac'):
+ network_mac = network_params['mac'].lower()
+ else:
+ network_mac = vm_vif_params['MAC'].lower()
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
+
+ for network_change in network_change_list_tmp:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change),
+ network_params[network_change])
+
+ position += 1
+ elif change.get('networks_new'):
+ for position, vif_device in change['networks_new']:
+ network_params = self.module.params['networks'][position]
+
+ network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+
+ network_name = network_params['name']
+ network_mac = network_params['mac'] if network_params.get('mac') else ""
+ network_type = network_params.get('type')
+ network_ip = network_params['ip'] if network_params.get('ip') else ""
+ network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
+ network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
+ network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
+ network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
+ network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
+
+ vif = {
+ "device": vif_device,
+ "network": network_ref,
+ "VM": self.vm_ref,
+ "MAC": network_mac,
+ "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
+ "%s/%s" % (network_ip, network_prefix), network_gateway)
+
+ if network_type6 and network_type6 == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
+ "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
+ elif self.vm_params['customization_agent'] == "custom":
+ # We first have to remove any existing data
+ # from xenstore_data because there could be
+ # some old leftover data from some interface
+ # that once occupied same device location as
+ # our new interface.
+ for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
+
+ # We get MAC from VIF itself instead of
+ # networks.mac because it could be
+ # autogenerated.
+ vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
+
+ if network_type:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
+
+ if network_type == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip" % vif_device, network_ip)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix" % vif_device, network_prefix)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/netmask" % vif_device, network_netmask)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway" % vif_device, network_gateway)
+
+ if network_type6:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
+
+ if network_type6 == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip6" % vif_device, network_ip6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
+
+ elif change.get('custom_params'):
+ for position in change['custom_params']:
+ custom_param_key = self.module.params['custom_params'][position]['key']
+ custom_param_value = self.module.params['custom_params'][position]['value']
+ self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
+
+ if self.module.params['is_template']:
+ self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
+ elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
+ self.set_power_state("poweredon")
+
+ # Gather new params after reconfiguration.
+ self.gather_params()
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return config_changes
+
+ def destroy(self):
+ """Removes an existing VM with associated disks"""
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called destroy on non existing VM!")
+
+ if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Make sure that VM is poweredoff before we can destroy it.
+ self.set_power_state("poweredoff")
+
+ try:
+ # Destroy VM!
+ self.xapi_session.xenapi.VM.destroy(self.vm_ref)
+
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Destroy all VDIs associated with VM!
+ for vm_disk_params in vm_disk_params_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
+
+ self.xapi_session.xenapi.VDI.destroy(vdi_ref)
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_changes(self):
+ """Finds VM parameters that differ from specified ones.
+
+ This method builds a dictionary with hierarchy of VM parameters
+ that differ from those specified in module parameters.
+
+ Returns:
+ list: VM parameters that differ from those specified in
+ module parameters.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called get_changes on non existing VM!")
+
+ need_poweredoff = False
+
+ if self.module.params['is_template']:
+ need_poweredoff = True
+
+ try:
+ # This VM could be a template or a snapshot. In that case we fail
+ # because we can't reconfigure them or it would just be too
+ # dangerous.
+ if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
+
+ if self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
+
+ # Let's build a list of parameters that changed.
+ config_changes = []
+
+ # Name could only differ if we found an existing VM by uuid.
+ if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
+ if self.module.params['name']:
+ config_changes.append('name')
+ else:
+ self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
+
+ if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
+ config_changes.append('name_desc')
+
+ # Folder parameter is found in other_config.
+ vm_other_config = self.vm_params['other_config']
+ vm_folder = vm_other_config.get('folder', '')
+
+ if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
+ config_changes.append('folder')
+
+ if self.module.params['home_server'] is not None:
+ if (self.module.params['home_server'] and
+ (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
+
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
+ msg_prefix="VM check home_server: ")
+
+ config_changes.append('home_server')
+ elif not self.module.params['home_server'] and self.vm_params['affinity']:
+ config_changes.append('home_server')
+
+ config_changes_hardware = []
+
+ if self.module.params['hardware']:
+ num_cpus = self.module.params['hardware'].get('num_cpus')
+
+ if num_cpus is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpus = int(num_cpus)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
+
+ if num_cpus < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
+
+ # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
+ # say the former is the way to go but this needs
+ # confirmation and testing.
+ if num_cpus != int(self.vm_params['VCPUs_at_startup']):
+ config_changes_hardware.append('num_cpus')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
+
+ if num_cpu_cores_per_socket is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
+
+ if num_cpu_cores_per_socket < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
+
+ if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
+
+ vm_platform = self.vm_params['platform']
+ vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
+
+ if num_cpu_cores_per_socket != vm_cores_per_socket:
+ config_changes_hardware.append('num_cpu_cores_per_socket')
+ # For now, we don't support hotpluging so VM has to be
+ # in poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ memory_mb = self.module.params['hardware'].get('memory_mb')
+
+ if memory_mb is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ memory_mb = int(memory_mb)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
+
+ if memory_mb < 1:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
+
+ # There are multiple memory parameters:
+ # - memory_dynamic_max
+ # - memory_dynamic_min
+ # - memory_static_max
+ # - memory_static_min
+ # - memory_target
+ #
+ # memory_target seems like a good candidate but it returns 0 for
+ # halted VMs so we can't use it.
+ #
+ # I decided to use memory_dynamic_max and memory_static_max
+ # and use whichever is larger. This strategy needs validation
+ # and testing.
+ #
+ # XenServer stores memory size in bytes so we need to divide
+ # it by 1024*1024 = 1048576.
+ if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
+ config_changes_hardware.append('memory_mb')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ if config_changes_hardware:
+ config_changes.append({"hardware": config_changes_hardware})
+
+ config_changes_disks = []
+ config_new_disks = []
+
+ # Find allowed userdevices.
+ vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if self.module.params['disks']:
+ # Get the list of all disk. Filter out any CDs found.
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Number of disks defined in module params have to be same or
+ # higher than a number of existing disks attached to the VM.
+ # We don't support removal or detachment of disks.
+ if len(self.module.params['disks']) < len(vm_disk_params_list):
+ self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
+ (len(self.module.params['disks']), len(vm_disk_params_list)))
+
+ # Find the highest disk occupied userdevice.
+ if not vm_disk_params_list:
+ vm_disk_userdevice_highest = "-1"
+ else:
+ vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
+
+ for position in range(len(self.module.params['disks'])):
+ if position < len(vm_disk_params_list):
+ vm_disk_params = vm_disk_params_list[position]
+ else:
+ vm_disk_params = None
+
+ disk_params = self.module.params['disks'][position]
+
+ disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
+
+ disk_name = disk_params.get('name')
+
+ if disk_name is not None and not disk_name:
+ self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
+
+ # If this is an existing disk.
+ if vm_disk_params and vm_disk_params['VDI']:
+ disk_changes = []
+
+ if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
+ disk_changes.append('name')
+
+ disk_name_desc = disk_params.get('name_desc')
+
+ if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
+ disk_changes.append('name_desc')
+
+ if disk_size:
+ if disk_size > int(vm_disk_params['VDI']['virtual_size']):
+ disk_changes.append('size')
+ need_poweredoff = True
+ elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
+ self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
+ "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
+
+ config_changes_disks.append(disk_changes)
+ # If this is a new disk.
+ else:
+ if not disk_size:
+ self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM check disks[%s]: " % position)
+ elif self.default_sr_ref == 'OpaqueRef:NULL':
+ self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
+
+ if not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
+
+ disk_userdevice = None
+
+ # We need to place a new disk right above the highest
+ # placed existing disk to maintain relative disk
+ # positions pairable with disk specifications in
+ # module params. That place must not be occupied by
+ # some other device like CD-ROM.
+ for userdevice in vbd_userdevices_allowed:
+ if int(userdevice) > int(vm_disk_userdevice_highest):
+ disk_userdevice = userdevice
+ vbd_userdevices_allowed.remove(userdevice)
+ vm_disk_userdevice_highest = userdevice
+ break
+
+ # If no place was found.
+ if disk_userdevice is None:
+ # Highest occupied place could be a CD-ROM device
+ # so we have to include all devices regardless of
+ # type when calculating out-of-bound position.
+ disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
+ self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
+
+ # For new disks we only track their position.
+ config_new_disks.append((position, disk_userdevice))
+
+ # We should append config_changes_disks to config_changes only
+ # if there is at least one changed disk, else skip.
+ for disk_change in config_changes_disks:
+ if disk_change:
+ config_changes.append({"disks_changed": config_changes_disks})
+ break
+
+ if config_new_disks:
+ config_changes.append({"disks_new": config_new_disks})
+
+ config_changes_cdrom = []
+
+ if self.module.params['cdrom']:
+ # Get the list of all CD-ROMs. Filter out any regular disks
+ # found. If we found no existing CD-ROM, we will create it
+ # later else take the first one found.
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If no existing CD-ROM is found, we will need to add one.
+ # We need to check if there is any userdevice allowed.
+ if not vm_cdrom_params_list and not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
+
+ cdrom_type = self.module.params['cdrom'].get('type')
+ cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
+
+ # If cdrom.iso_name is specified but cdrom.type is not,
+ # then set cdrom.type to 'iso', unless cdrom.iso_name is
+ # an empty string, in that case set cdrom.type to 'none'.
+ if not cdrom_type:
+ if cdrom_iso_name:
+ cdrom_type = "iso"
+ elif cdrom_iso_name is not None:
+ cdrom_type = "none"
+
+ self.module.params['cdrom']['type'] = cdrom_type
+
+ # If type changed.
+ if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
+ config_changes_cdrom.append('type')
+
+ if cdrom_type == "iso":
+ # Check if ISO exists.
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
+ msg_prefix="VM check cdrom.iso_name: ")
+
+ # Is ISO image changed?
+ if (cdrom_iso_name and
+ (not vm_cdrom_params_list or
+ not vm_cdrom_params_list[0]['VDI'] or
+ cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
+ config_changes_cdrom.append('iso_name')
+
+ if config_changes_cdrom:
+ config_changes.append({"cdrom": config_changes_cdrom})
+
+ config_changes_networks = []
+ config_new_networks = []
+
+ # Find allowed devices.
+ vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
+
+ if self.module.params['networks']:
+ # Number of VIFs defined in module params have to be same or
+ # higher than a number of existing VIFs attached to the VM.
+ # We don't support removal of VIFs.
+ if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
+ self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
+ (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
+
+ # Find the highest occupied device.
+ if not self.vm_params['VIFs']:
+ vif_device_highest = "-1"
+ else:
+ vif_device_highest = self.vm_params['VIFs'][-1]['device']
+
+ for position in range(len(self.module.params['networks'])):
+ if position < len(self.vm_params['VIFs']):
+ vm_vif_params = self.vm_params['VIFs'][position]
+ else:
+ vm_vif_params = None
+
+ network_params = self.module.params['networks'][position]
+
+ network_name = network_params.get('name')
+
+ if network_name is not None and not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
+
+ if network_name:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
+ msg_prefix="VM check networks[%s]: " % position)
+
+ network_mac = network_params.get('mac')
+
+ if network_mac is not None:
+ network_mac = network_mac.lower()
+
+ if not is_mac(network_mac):
+ self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
+
+ # IPv4 reconfiguration.
+ network_type = network_params.get('type')
+ network_ip = network_params.get('ip')
+ network_netmask = network_params.get('netmask')
+ network_prefix = None
+
+ # If networks.ip is specified and networks.type is not,
+ # then set networks.type to 'static'.
+ if not network_type and network_ip:
+ network_type = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
+ network_type = "none"
+
+ if network_type and network_type == "static":
+ if network_ip is not None:
+ network_ip_split = network_ip.split('/')
+ network_ip = network_ip_split[0]
+
+ if network_ip and not is_valid_ip_addr(network_ip):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
+
+ if len(network_ip_split) > 1:
+ network_prefix = network_ip_split[1]
+
+ if not is_valid_ip_prefix(network_prefix):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
+
+ if network_netmask is not None:
+ if not is_valid_ip_netmask(network_netmask):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
+
+ network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
+ elif network_prefix is not None:
+ network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
+
+ # If any parameter is overridden at this point, update it.
+ if network_type:
+ network_params['type'] = network_type
+
+ if network_ip:
+ network_params['ip'] = network_ip
+
+ if network_netmask:
+ network_params['netmask'] = network_netmask
+
+ if network_prefix:
+ network_params['prefix'] = network_prefix
+
+ network_gateway = network_params.get('gateway')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway and not is_valid_ip_addr(network_gateway):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
+
+ # IPv6 reconfiguration.
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params.get('ip6')
+ network_prefix6 = None
+
+ # If networks.ip6 is specified and networks.type6 is not,
+ # then set networks.type6 to 'static'.
+ if not network_type6 and network_ip6:
+ network_type6 = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
+ network_type6 = "none"
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 is not None:
+ network_ip6_split = network_ip6.split('/')
+ network_ip6 = network_ip6_split[0]
+
+ if network_ip6 and not is_valid_ip6_addr(network_ip6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
+
+ if len(network_ip6_split) > 1:
+ network_prefix6 = network_ip6_split[1]
+
+ if not is_valid_ip6_prefix(network_prefix6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
+
+ # If any parameter is overridden at this point, update it.
+ if network_type6:
+ network_params['type6'] = network_type6
+
+ if network_ip6:
+ network_params['ip6'] = network_ip6
+
+ if network_prefix6:
+ network_params['prefix6'] = network_prefix6
+
+ network_gateway6 = network_params.get('gateway6')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
+
+ # If this is an existing VIF.
+ if vm_vif_params and vm_vif_params['network']:
+ network_changes = []
+
+ if network_name and network_name != vm_vif_params['network']['name_label']:
+ network_changes.append('name')
+
+ if network_mac and network_mac != vm_vif_params['MAC'].lower():
+ network_changes.append('mac')
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
+ network_changes.append('type')
+
+ if network_type and network_type == "static":
+ if network_ip and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
+ network_changes.append('ip')
+
+ if network_prefix and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+
+ if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
+ network_changes.append('gateway')
+
+ if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
+ network_changes.append('type6')
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
+ network_changes.append('ip6')
+
+ if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
+ network_changes.append('prefix6')
+
+ if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
+ network_changes.append('gateway6')
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = self.vm_params['xenstore_data']
+
+ if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
+ network_changes.append('type')
+ need_poweredoff = True
+
+ if network_type and network_type == "static":
+ if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
+ network_changes.append('ip')
+ need_poweredoff = True
+
+ if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+ need_poweredoff = True
+
+ if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
+ network_changes.append('type6')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
+ network_changes.append('ip6')
+ need_poweredoff = True
+
+ if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
+ network_changes.append('prefix6')
+ need_poweredoff = True
+
+ if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway6')
+ need_poweredoff = True
+
+ config_changes_networks.append(network_changes)
+ # If this is a new VIF.
+ else:
+ if not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
+
+ if network_type and network_type == "static" and network_ip and not network_netmask:
+ self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
+
+ if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
+ self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
+
+ # Restart is needed if we are adding new network
+ # interface with IP/gateway parameters specified
+ # and custom agent is used.
+ if self.vm_params['customization_agent'] == "custom":
+ for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ if network_params.get(parameter):
+ need_poweredoff = True
+ break
+
+ if not vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
+
+ # We need to place a new network interface right above the
+ # highest placed existing interface to maintain relative
+ # positions pairable with network interface specifications
+ # in module params.
+ vif_device = str(int(vif_device_highest) + 1)
+
+ if vif_device not in vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
+
+ vif_devices_allowed.remove(vif_device)
+ vif_device_highest = vif_device
+
+ # For new VIFs we only track their position.
+ config_new_networks.append((position, vif_device))
+
+ # We should append config_changes_networks to config_changes only
+ # if there is at least one changed network, else skip.
+ for network_change in config_changes_networks:
+ if network_change:
+ config_changes.append({"networks_changed": config_changes_networks})
+ break
+
+ if config_new_networks:
+ config_changes.append({"networks_new": config_new_networks})
+
+ config_changes_custom_params = []
+
+ if self.module.params['custom_params']:
+ for position in range(len(self.module.params['custom_params'])):
+ custom_param = self.module.params['custom_params'][position]
+
+ custom_param_key = custom_param['key']
+ custom_param_value = custom_param['value']
+
+ if custom_param_key not in self.vm_params:
+ self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
+
+ if custom_param_value != self.vm_params[custom_param_key]:
+ # We only need to track custom param position.
+ config_changes_custom_params.append(position)
+
+ if config_changes_custom_params:
+ config_changes.append({"custom_params": config_changes_custom_params})
+
+ if need_poweredoff:
+ config_changes.append('need_poweredoff')
+
+ return config_changes
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_normalized_disk_size(self, disk_params, msg_prefix=""):
+ """Parses disk size parameters and returns disk size in bytes.
+
+ This method tries to parse disk size module parameters. It fails
+ with an error message if size cannot be parsed.
+
+ Args:
+ disk_params (dist): A dictionary with disk parameters.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ int: disk size in bytes if disk size is successfully parsed or
+ None if no disk size parameters were found.
+ """
+ # There should be only single size spec but we make a list of all size
+ # specs just in case. Priority is given to 'size' but if not found, we
+ # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
+ # found.
+ disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
+
+ if disk_size_spec:
+ try:
+ # size
+ if "size" in disk_size_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
+ disk_size_m = size_regex.match(disk_params['size'])
+
+ if disk_size_m:
+ size = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+ # size_tb, size_gb, size_mb, size_kb, size_b
+ else:
+ size = disk_params[disk_size_spec[0]]
+ unit = disk_size_spec[0].split('_')[-1]
+
+ if not unit:
+ unit = "b"
+ else:
+ unit = unit.lower()
+
+ if re.match(r'\d+\.\d+', size):
+ # We found float value in string, let's typecast it.
+ if unit == "b":
+ # If we found float but unit is bytes, we get the integer part only.
+ size = int(float(size))
+ else:
+ size = float(size)
+ else:
+ # We found int value in string, let's typecast it.
+ size = int(size)
+
+ if not size or size < 0:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
+
+ disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
+
+ if unit in disk_units:
+ return int(size * (1024 ** disk_units[unit]))
+ else:
+ self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
+ (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
+ else:
+ return None
+
+ @staticmethod
+ def get_cdrom_type(vm_cdrom_params):
+ """Returns VM CD-ROM type."""
+ # TODO: implement support for detecting type host. No server to test
+ # this on at the moment.
+ if vm_cdrom_params['empty']:
+ return "none"
+ else:
+ return "iso"
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'poweredon']),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ uuid=dict(type='str'),
+ template=dict(type='str', aliases=['template_src']),
+ template_uuid=dict(type='str'),
+ is_template=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ hardware=dict(
+ type='dict',
+ options=dict(
+ num_cpus=dict(type='int'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ memory_mb=dict(type='int'),
+ ),
+ ),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_tb=dict(type='str'),
+ size_gb=dict(type='str'),
+ size_mb=dict(type='str'),
+ size_kb=dict(type='str'),
+ size_b=dict(type='str'),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ sr=dict(type='str'),
+ sr_uuid=dict(type='str'),
+ ),
+ aliases=['disk'],
+ mutually_exclusive=[
+ ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
+ ['sr', 'sr_uuid'],
+ ],
+ ),
+ cdrom=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', choices=['none', 'iso']),
+ iso_name=dict(type='str'),
+ ),
+ required_if=[
+ ['type', 'iso', ['iso_name']],
+ ],
+ ),
+ networks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', aliases=['name_label']),
+ mac=dict(type='str'),
+ type=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ type6=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip6=dict(type='str'),
+ gateway6=dict(type='str'),
+ ),
+ aliases=['network'],
+ required_if=[
+ ['type', 'static', ['ip']],
+ ['type6', 'static', ['ip6']],
+ ],
+ ),
+ home_server=dict(type='str'),
+ custom_params=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ key=dict(type='str', required=True),
+ value=dict(type='raw', required=True),
+ ),
+ ),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ linked_clone=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ mutually_exclusive=[
+ ['template', 'template_uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ vm = XenServerVM(module)
+
+ # Find existing VM
+ if vm.exists():
+ if module.params['state'] == "absent":
+ vm.destroy()
+ result['changed'] = True
+ elif module.params['state'] == "present":
+ config_changes = vm.reconfigure()
+
+ if config_changes:
+ result['changed'] = True
+
+ # Make new disk and network changes more user friendly
+ # and informative.
+ for change in config_changes:
+ if isinstance(change, dict):
+ if change.get('disks_new'):
+ disks_new = []
+
+ for position, userdevice in change['disks_new']:
+ disk_new_params = {"position": position, "vbd_userdevice": userdevice}
+ disk_params = module.params['disks'][position]
+
+ for k in disk_params.keys():
+ if disk_params[k] is not None:
+ disk_new_params[k] = disk_params[k]
+
+ disks_new.append(disk_new_params)
+
+ if disks_new:
+ change['disks_new'] = disks_new
+
+ elif change.get('networks_new'):
+ networks_new = []
+
+ for position, device in change['networks_new']:
+ network_new_params = {"position": position, "vif_device": device}
+ network_params = module.params['networks'][position]
+
+ for k in network_params.keys():
+ if network_params[k] is not None:
+ network_new_params[k] = network_params[k]
+
+ networks_new.append(network_new_params)
+
+ if networks_new:
+ change['networks_new'] = networks_new
+
+ result['changes'] = config_changes
+
+ elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
+ result['changed'] = vm.set_power_state(module.params['state'])
+ elif module.params['state'] != "absent":
+ vm.deploy()
+ result['changed'] = True
+
+ if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
new file mode 100644
index 00000000..4a195ff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_powerstate
+short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
+ - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ type: str
+ default: present
+ choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
+ name:
+ description:
+ - Name of the VM to manage.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Power on VM
+ community.general.xenserver_guest_powerstate:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ state: powered-on
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Set VM power state.
+ if module.params['state'] != "present":
+ result['changed'] = vm.set_power_state(module.params['state'])
+
+ if module.params['wait_for_ip_address']:
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
new file mode 100644
index 00000000..2efb90cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ type: str
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ community.general.cloud_init_data_facts:
+ register: result
+
+- ansible.builtin.debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ community.general.cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
new file mode 100644
index 00000000..fc62aa70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
@@ -0,0 +1,878 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudflare_dns
+author:
+- Michael Gruener (@mgruener)
+requirements:
+ - python >= 2.6
+short_description: Manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ api_token:
+ description:
+ - API token.
+ - Required for api token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ version_added: '0.2.0'
+ account_api_key:
+ description:
+ - Account API key.
+ - Required for api keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ aliases: [ account_api_token ]
+ account_email:
+ description:
+ - Account email. Required for api keys authentication.
+ type: str
+ required: false
+ algorithm:
+ description:
+ - Algorithm number.
+ - Required for C(type=DS) and C(type=SSHFP) when C(state=present).
+ type: int
+ cert_usage:
+ description:
+ - Certificate usage number.
+ - Required for C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ hash_type:
+ description:
+ - Hash type number.
+ - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 1, 2 ]
+ key_tag:
+ description:
+ - DNSSEC key tag.
+ - Needed for C(type=DS) when C(state=present).
+ type: int
+ port:
+ description:
+ - Service port.
+ - Required for C(type=SRV) and C(type=TLSA).
+ type: int
+ priority:
+ description:
+ - Record priority.
+ - Required for C(type=MX) and C(type=SRV)
+ default: 1
+ type: int
+ proto:
+ description:
+ - Service protocol. Required for C(type=SRV) and C(type=TLSA).
+ - Common values are TCP and UDP.
+ - Before Ansible 2.6 only TCP and UDP were available.
+ type: str
+ proxied:
+ description:
+ - Proxy through Cloudflare network or just use DNS.
+ type: bool
+ default: no
+ record:
+ description:
+ - Record to add.
+ - Required if C(state=present).
+ - Default is C(@) (e.g. the zone name).
+ type: str
+ default: '@'
+ aliases: [ name ]
+ selector:
+ description:
+ - Selector number.
+ - Required for C(type=TLSA) when C(state=present).
+ choices: [ 0, 1 ]
+ type: int
+ service:
+ description:
+ - Record service.
+ - Required for C(type=SRV)
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state=present).
+ - This will delete all other records with the same record name and type.
+ type: bool
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls.
+ type: int
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ type: int
+ default: 1
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present).
+ - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7.
+ type: str
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ value:
+ description:
+ - The record value.
+ - Required for C(state=present).
+ type: str
+ aliases: [ content ]
+ weight:
+ description:
+ - Service weight.
+ - Required for C(type=SRV).
+ type: int
+ default: 1
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com").
+ - The Zone must already exist.
+ type: str
+ required: true
+ aliases: [ domain ]
+'''
+
+EXAMPLES = r'''
+- name: Create a test.example.net A record to point to 127.0.0.1
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ register: record
+
+- name: Create a record using api token
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ api_token: dummyapitoken
+
+- name: Create a example.net CNAME record to example.com
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ ttl: 600
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: absent
+
+- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ proxied: yes
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+# This deletes all other TXT records named "test.example.net"
+- name: Create TXT record "test.example.net" with value "unique value"
+ community.general.cloudflare_dns:
+ domain: example.net
+ record: test
+ type: TXT
+ value: unique value
+ solo: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Create an SRV record _foo._tcp.example.net
+ community.general.cloudflare_dns:
+ domain: example.net
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.example.net
+
+- name: Create a SSHFP record login.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: login
+ type: SSHFP
+ algorithm: 4
+ hash_type: 2
+ value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
+
+- name: Create a TLSA record _25._tcp.mail.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: mail
+ port: 25
+ proto: tcp
+ type: TLSA
+ cert_usage: 3
+ selector: 1
+ hash_type: 1
+ value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+
+- name: Create a DS record for subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: DS
+ key_tag: 5464
+ algorithm: 8
+ hash_type: 2
+ value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP or TLSA
+ type: dict
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: False
+ meta:
+ description: No documentation available.
+ returned: success
+ type: dict
+ sample: { auto_added: false }
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+def lowercase_string(param):
+ if not isinstance(param, str):
+ return param
+ return param.lower()
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.api_token = module.params['api_token']
+ self.account_api_key = module.params['account_api_key']
+ self.account_email = module.params['account_email']
+ self.algorithm = module.params['algorithm']
+ self.cert_usage = module.params['cert_usage']
+ self.hash_type = module.params['hash_type']
+ self.key_tag = module.params['key_tag']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = lowercase_string(module.params['proto'])
+ self.proxied = module.params['proxied']
+ self.selector = module.params['selector']
+ self.record = lowercase_string(module.params['record'])
+ self.service = lowercase_string(module.params['service'])
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = lowercase_string(module.params['zone'])
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.').lower()
+
+ if (self.type == 'AAAA') and (self.value is not None):
+ self.value = self.value.lower()
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if (self.type == 'TLSA'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.port is not None):
+ self.port = '_' + str(self.port)
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ if (self.type == 'DS'):
+ if self.record == self.zone:
+ self.module.fail_json(msg="DS records only apply to subdomains.")
+
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ if self.api_token:
+ headers = {
+ 'Authorization': 'Bearer ' + self.api_token,
+ 'Content-Type': 'application/json',
+ }
+ else:
+ headers = {
+ 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_key,
+ 'Content-Type': 'application/json',
+ }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ # Without a valid/parsed JSON response no more error processing can be done
+ if result is None:
+ self.module.fail_json(msg=error_msg)
+
+ if 'success' not in result:
+ error_msg += "; Unexpected error details: {0}".format(result.get('error'))
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call, query = api_call.split('?', 1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self, zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self, name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urlencode(query)
+
+ records, status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self, **kwargs):
+ params = {}
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ elif params['type'] == 'DS':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'SSHFP':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'TLSA':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self, **kwargs):
+ params = {}
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ if params['type'] == 'DS':
+ for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
+ ds_data = {
+ "key_tag": params['key_tag'],
+ "algorithm": params['algorithm'],
+ "digest_type": params['hash_type'],
+ "digest": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': ds_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'SSHFP':
+ for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
+ sshfp_data = {
+ "fingerprint": params['value'],
+ "type": params['hash_type'],
+ "algorithm": params['algorithm'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': sshfp_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'TLSA':
+ for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ tlsa_data = {
+ "usage": params['cert_usage'],
+ "selector": params['selector'],
+ "matching_type": params['hash_type'],
+ "certificate": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": search_record,
+ 'data': tlsa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] != new_record['data']):
+ do_update = True
+ if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
+ self.changed = True
+ return result, self.changed
+ else:
+ return records, self.changed
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
+ self.changed = True
+ return result, self.changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_token=dict(type='str', required=False, no_log=True),
+ account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str', required=False),
+ algorithm=dict(type='int'),
+ cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ hash_type=dict(type='int', choices=[1, 2]),
+ key_tag=dict(type='int'),
+ port=dict(type='int'),
+ priority=dict(type='int', default=1),
+ proto=dict(type='str'),
+ proxied=dict(type='bool', default=False),
+ record=dict(type='str', default='@', aliases=['name']),
+ selector=dict(type='int', choices=[0, 1]),
+ service=dict(type='str'),
+ solo=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ timeout=dict(type='int', default=30),
+ ttl=dict(type='int', default=1),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ value=dict(type='str', aliases=['content']),
+ weight=dict(type='int', default=1),
+ zone=dict(type='str', required=True, aliases=['domain']),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['record', 'type', 'value']),
+ ('state', 'absent', ['record']),
+ ('type', 'SRV', ['proto', 'service']),
+ ('type', 'TLSA', ['proto', 'port']),
+ ],
+ )
+
+ if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
+ module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
+ if module.params['type'] == 'SRV':
+ if not ((module.params['weight'] is not None and module.params['port'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['weight'] is None and module.params['port'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'SSHFP':
+ if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'TLSA':
+ if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'DS':
+ if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
+
+ module.exit_json(changed=changed, result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
new file mode 100644
index 00000000..dd8a5f50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
@@ -0,0 +1,603 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul
+short_description: "Add, modify & delete services within a consul cluster."
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a I(check_name) and optionally a I(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An API method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See U(http://consul.io) for more details."
+requirements:
+ - python-consul
+ - requests
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - register or deregister the consul service, defaults to present
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be omitted if registering
+ a node level check
+ service_id:
+ type: str
+ description:
+ - the ID for the service, must be unique per node. If I(state=absent),
+ defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - host of the consul agent defaults to localhost
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - the protocol scheme on which the consul agent is running
+ default: http
+ validate_certs:
+ description:
+ - whether to verify the TLS certificate of the consul agent
+ type: bool
+ default: 'yes'
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - the port on which the service is listening. Can optionally be supplied for
+ registration of a service, i.e. if I(service_name) or I(service_id) is set
+ service_address:
+ type: str
+ description:
+ - the address to advertise that the service will be listening on.
+ This value will be passed as the I(address) parameter to Consul's
+ U(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ tags:
+ type: list
+ description:
+ - tags that will be attached to the service registration.
+ script:
+ type: str
+ description:
+ - the script/command that will be run periodically to check the health
+ of the service. Scripts require I(interval) and vice versa.
+ interval:
+ type: str
+ description:
+ - the interval at which the service check will be run. This is a number
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
+ C(1) will be C(1m). Required if the I(script) parameter is specified.
+ check_id:
+ type: str
+ description:
+ - an ID for the service check. If I(state=absent), defaults to
+ I(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - a name for the service check. Required if standalone, ignored if
+ part of service definition.
+ ttl:
+ type: str
+ description:
+ - checks can be registered with a ttl instead of a I(script) and I(interval)
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
+ is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that consul
+ will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
+ The format is C(host:port), for example C(localhost:80).
+ I(interval) must also be provided with this option.
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - checks can be registered with an HTTP endpoint. This means that consul
+ will check that the http endpoint returns a successful HTTP status.
+ I(interval) must also be provided with this option.
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes, e.g. C(15s) or C(1m).
+ token:
+ type: str
+ description:
+ - the token key identifying an ACL rule set. May be required to register services.
+'''
+
+EXAMPLES = '''
+- name: Register nginx service with the local consul agent
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+
+- name: Register nginx service with curl check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ script: curl http://localhost
+ interval: 60s
+
+- name: register nginx with a tcp check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ tcp: localhost:80
+
+- name: Register nginx with an http check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: http://localhost:80/status
+
+- name: Register external service nginx available at 10.1.5.23
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+- name: Register nginx with some service tags
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+- name: Remove nginx service
+ community.general.consul:
+ service_name: nginx
+ state: absent
+
+- name: Register celery worker service
+ community.general.consul:
+ service_name: celery-worker
+ tags:
+ - prod
+ - worker
+
+- name: Create a node level check to test disk usage
+ community.general.consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: /opt/disk_usage.py
+ interval: 5m
+
+- name: Register an http check against a service that's already registered
+ community.general.consul:
+ check_name: nginx-check2
+ check_id: nginx-check2
+ service_id: nginx
+ interval: 60s
+ http: http://localhost:80/morestatus
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+
+ class PatchedConsulAgentService(consul.Consul.Agent.Service):
+ def deregister(self, service_id, token=None):
+ params = {}
+ if token:
+ params['token'] = token
+ return self.agent.http.put(consul.base.CB.bool(),
+ '/v1/agent/service/deregister/%s' % service_id,
+ params=params)
+
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def register_with_consul(module):
+ state = module.params.get('state')
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params.get('service_id') or module.params.get('service_name')
+ check_id = module.params.get('check_id') or module.params.get('check_name')
+ if not (service_id or check_id):
+ module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name and not check.service_id:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ tcp=check.tcp,
+ http=check.http,
+ timeout=check.timeout,
+ service_id=check.service_id)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id_or_name(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id_or_name(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id_or_name(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module, token=None):
+ consulClient = consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+ consulClient.agent.service = PatchedConsulAgentService(consulClient)
+ return consulClient
+
+
+def get_service_by_id_or_name(consul_api, service_id_or_name):
+ ''' iterate the registered services and find one with the given id '''
+ for name, service in consul_api.agent.services().items():
+ if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+ if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1:
+ module.fail_json(
+ msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'):
+
+ return ConsulCheck(
+ module.params.get('check_id'),
+ module.params.get('check_name'),
+ module.params.get('check_node'),
+ module.params.get('check_host'),
+ module.params.get('script'),
+ module.params.get('interval'),
+ module.params.get('ttl'),
+ module.params.get('notes'),
+ module.params.get('tcp'),
+ module.params.get('http'),
+ module.params.get('timeout'),
+ module.params.get('service_id'),
+ )
+
+
+def parse_service(module):
+ if module.params.get('service_name'):
+ return ConsulService(
+ module.params.get('service_id'),
+ module.params.get('service_name'),
+ module.params.get('service_address'),
+ module.params.get('service_port'),
+ module.params.get('tags'),
+ )
+ elif not module.params.get('service_name'):
+ module.fail_json(msg="service_name is required to configure a service.")
+
+
+class ConsulService():
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self.checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ optional = {}
+
+ if self.port:
+ optional['port'] = self.port
+
+ if len(self.checks) > 0:
+ optional['check'] = self.checks[0].check
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ tags=self.tags,
+ **optional)
+
+ def add_check(self, check):
+ self.checks.append(check)
+
+ def checks(self):
+ return self.checks
+
+ def has_checks(self):
+ return len(self.checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.id == other.id and
+ self.name == other.name and
+ self.port == other.port and
+ self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self.checks) > 0:
+ data['check'] = self.checks[0].to_dict()
+ return data
+
+
+class ConsulCheck(object):
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.service_id = service_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.tcp = tcp
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+ if tcp:
+ if interval is None:
+ raise Exception('tcp check must specify interval')
+
+ regex = r"(?P<host>.*)(?::)(?P<port>(?:[0-9]+))$"
+ match = re.match(regex, tcp)
+
+ if match is None:
+ raise Exception('tcp check must be in host:port format')
+
+ self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any((duration.endswith(suffix) for suffix in duration_units)):
+ duration = "{0}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.check_id == other.check_id and
+ self.service_id == other.service_id and
+ self.name == other.name and
+ self.script == other.script and
+ self.interval == other.interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'tcp')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ self._add(data, 'service_id')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except Exception:
+ pass
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ check_id=dict(required=False),
+ check_name=dict(required=False),
+ check_node=dict(required=False),
+ check_host=dict(required=False),
+ notes=dict(required=False),
+ script=dict(required=False),
+ service_id=dict(required=False),
+ service_name=dict(required=False),
+ service_address=dict(required=False, type='str', default=None),
+ service_port=dict(required=False, type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(required=False, type='str'),
+ ttl=dict(required=False, type='str'),
+ tcp=dict(required=False, type='str'),
+ http=dict(required=False, type='str'),
+ timeout=dict(required=False, type='str'),
+ tags=dict(required=False, type='list'),
+ token=dict(required=False, no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ test_dependencies(module)
+
+ try:
+ register_with_consul(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
new file mode 100644
index 00000000..06feeea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_acl
+short_description: Manipulate Consul ACL keys and rules
+description:
+ - Allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ required: true
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ token_type:
+ description:
+ - the type of token that should be created
+ choices: ['client', 'management']
+ default: client
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ token:
+ description:
+ - the token key identifying an ACL rule set. If generated by consul
+ this will be a UUID
+ required: false
+ rules:
+ type: list
+ description:
+ - rules that should be associated with a given token
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ validate_certs:
+ type: bool
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+requirements:
+ - python-consul
+ - pyhcl
+ - requests
+'''
+
+EXAMPLES = """
+- name: Create an ACL with rules
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ rules:
+ - key: "foo"
+ policy: read
+ - key: "private/foo"
+ policy: deny
+
+- name: Create an ACL with a specific token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: my-token
+ rules:
+ - key: "foo"
+ policy: read
+
+- name: Update the rules associated to an ACL token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: some_client_token
+ rules:
+ - event: "bbq"
+ policy: write
+ - key: "foo"
+ policy: read
+ - key: "private"
+ policy: deny
+ - keyring: write
+ - node: "hgs4"
+ policy: write
+ - operator: read
+ - query: ""
+ policy: write
+ - service: "consul"
+ policy: write
+ - session: "standup"
+ policy: write
+
+- name: Remove a token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
+ state: absent
+"""
+
+RETURN = """
+token:
+ description: the token associated to the ACL (the ACL's ID)
+ returned: success
+ type: str
+ sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
+rules:
+ description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
+ Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
+ returned: I(status) == "present"
+ type: str
+ sample: {
+ "key": {
+ "foo": {
+ "policy": "write"
+ },
+ "bar": {
+ "policy": "deny"
+ }
+ }
+ }
+operation:
+ description: the operation performed on the ACL
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+try:
+ import consul
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+try:
+ from requests.exceptions import ConnectionError
+ has_requests = True
+except ImportError:
+ has_requests = False
+
+from collections import defaultdict
+from ansible.module_utils.basic import to_text, AnsibleModule
+
+
+RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
+
+MANAGEMENT_PARAMETER_NAME = "mgmt_token"
+HOST_PARAMETER_NAME = "host"
+SCHEME_PARAMETER_NAME = "scheme"
+VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
+NAME_PARAMETER_NAME = "name"
+PORT_PARAMETER_NAME = "port"
+RULES_PARAMETER_NAME = "rules"
+STATE_PARAMETER_NAME = "state"
+TOKEN_PARAMETER_NAME = "token"
+TOKEN_TYPE_PARAMETER_NAME = "token_type"
+
+PRESENT_STATE_VALUE = "present"
+ABSENT_STATE_VALUE = "absent"
+
+CLIENT_TOKEN_TYPE_VALUE = "client"
+MANAGEMENT_TOKEN_TYPE_VALUE = "management"
+
+REMOVE_OPERATION = "remove"
+UPDATE_OPERATION = "update"
+CREATE_OPERATION = "create"
+
+_POLICY_JSON_PROPERTY = "policy"
+_RULES_JSON_PROPERTY = "Rules"
+_TOKEN_JSON_PROPERTY = "ID"
+_TOKEN_TYPE_JSON_PROPERTY = "Type"
+_NAME_JSON_PROPERTY = "Name"
+_POLICY_YML_PROPERTY = "policy"
+_POLICY_HCL_PROPERTY = "policy"
+
+_ARGUMENT_SPEC = {
+ MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
+ HOST_PARAMETER_NAME: dict(default='localhost'),
+ SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
+ VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
+ NAME_PARAMETER_NAME: dict(required=False),
+ PORT_PARAMETER_NAME: dict(default=8500, type='int'),
+ RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
+ STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
+ TOKEN_PARAMETER_NAME: dict(required=False),
+ TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
+ default=CLIENT_TOKEN_TYPE_VALUE)
+}
+
+
+def set_acl(consul_client, configuration):
+ """
+ Sets an ACL based on the given configuration.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of setting the ACL
+ """
+ acls_as_json = decode_acls_as_json(consul_client.acl.list())
+ existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
+ existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
+ if None in existing_acls_mapped_by_token:
+ raise AssertionError("expecting ACL list to be associated to a token: %s" %
+ existing_acls_mapped_by_token[None])
+
+ if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
+ # No token but name given so can get token from name
+ configuration.token = existing_acls_mapped_by_name[configuration.name].token
+
+ if configuration.token and configuration.token in existing_acls_mapped_by_token:
+ return update_acl(consul_client, configuration)
+ else:
+ if configuration.token in existing_acls_mapped_by_token:
+ raise AssertionError()
+ if configuration.name in existing_acls_mapped_by_name:
+ raise AssertionError()
+ return create_acl(consul_client, configuration)
+
+
+def update_acl(consul_client, configuration):
+ """
+ Updates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the update
+ """
+ existing_acl = load_acl_with_token(consul_client, configuration.token)
+ changed = existing_acl.rules != configuration.rules
+
+ if changed:
+ name = configuration.name if configuration.name is not None else existing_acl.name
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
+ updated_token = consul_client.acl.update(
+ configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
+ if updated_token != configuration.token:
+ raise AssertionError()
+
+ return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
+
+
+def create_acl(consul_client, configuration):
+ """
+ Creates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the creation
+ """
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
+ token = consul_client.acl.create(
+ name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
+ rules = configuration.rules
+ return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
+
+
+def remove_acl(consul, configuration):
+ """
+ Removes an ACL.
+ :param consul: the consul client
+ :param configuration: the run configuration
+ :return: the output of the removal
+ """
+ token = configuration.token
+ changed = consul.acl.info(token) is not None
+ if changed:
+ consul.acl.destroy(token)
+ return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
+
+
+def load_acl_with_token(consul, token):
+ """
+ Loads the ACL with the given token (token == rule ID).
+ :param consul: the consul client
+ :param token: the ACL "token"/ID (not name)
+ :return: the ACL associated to the given token
+ :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
+ """
+ acl_as_json = consul.acl.info(token)
+ if acl_as_json is None:
+ raise ConsulACLNotFoundException(token)
+ return decode_acl_as_json(acl_as_json)
+
+
+def encode_rules_as_hcl_string(rules):
+ """
+ Converts the given rules into the equivalent HCL (string) representation.
+ :param rules: the rules
+ :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
+ note for justification)
+ """
+ if len(rules) == 0:
+ # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
+ # string if there is no rules...
+ return None
+ rules_as_hcl = ""
+ for rule in rules:
+ rules_as_hcl += encode_rule_as_hcl_string(rule)
+ return rules_as_hcl
+
+
+def encode_rule_as_hcl_string(rule):
+ """
+ Converts the given rule into the equivalent HCL (string) representation.
+ :param rule: the rule
+ :return: the equivalent HCL (string) representation of the rule
+ """
+ if rule.pattern is not None:
+ return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
+ else:
+ return '%s = "%s"\n' % (rule.scope, rule.policy)
+
+
+def decode_rules_as_hcl_string(rules_as_hcl):
+ """
+ Converts the given HCL (string) representation of rules into a list of rule domain models.
+ :param rules_as_hcl: the HCL (string) representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules_as_hcl = to_text(rules_as_hcl)
+ rules_as_json = hcl.loads(rules_as_hcl)
+ return decode_rules_as_json(rules_as_json)
+
+
+def decode_rules_as_json(rules_as_json):
+ """
+ Converts the given JSON representation of rules into a list of rule domain models.
+ :param rules_as_json: the JSON representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ for scope in rules_as_json:
+ if not isinstance(rules_as_json[scope], dict):
+ rules.add(Rule(scope, rules_as_json[scope]))
+ else:
+ for pattern, policy in rules_as_json[scope].items():
+ rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
+ return rules
+
+
+def encode_rules_as_json(rules):
+ """
+ Converts the given rules into the equivalent JSON representation according to the documentation:
+ https://www.consul.io/docs/guides/acl.html#rule-specification.
+ :param rules: the rules
+ :return: JSON representation of the given rules
+ """
+ rules_as_json = defaultdict(dict)
+ for rule in rules:
+ if rule.pattern is not None:
+ if rule.pattern in rules_as_json[rule.scope]:
+ raise AssertionError()
+ rules_as_json[rule.scope][rule.pattern] = {
+ _POLICY_JSON_PROPERTY: rule.policy
+ }
+ else:
+ if rule.scope in rules_as_json:
+ raise AssertionError()
+ rules_as_json[rule.scope] = rule.policy
+ return rules_as_json
+
+
+def decode_rules_as_yml(rules_as_yml):
+ """
+ Converts the given YAML representation of rules into a list of rule domain models.
+ :param rules_as_yml: the YAML representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ if rules_as_yml:
+ for rule_as_yml in rules_as_yml:
+ rule_added = False
+ for scope in RULE_SCOPES:
+ if scope in rule_as_yml:
+ if rule_as_yml[scope] is None:
+ raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
+ policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
+ else rule_as_yml[scope]
+ pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
+ rules.add(Rule(scope, policy, pattern))
+ rule_added = True
+ break
+ if not rule_added:
+ raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
+ return rules
+
+
+def decode_acl_as_json(acl_as_json):
+ """
+ Converts the given JSON representation of an ACL into the equivalent domain model.
+ :param acl_as_json: the JSON representation of an ACL
+ :return: the equivalent domain model to the given ACL
+ """
+ rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
+ rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
+ else RuleCollection()
+ return ACL(
+ rules=rules,
+ token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
+ token=acl_as_json[_TOKEN_JSON_PROPERTY],
+ name=acl_as_json[_NAME_JSON_PROPERTY]
+ )
+
+
+def decode_acls_as_json(acls_as_json):
+ """
+ Converts the given JSON representation of ACLs into a list of ACL domain models.
+ :param acls_as_json: the JSON representation of a collection of ACLs
+ :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
+ """
+ return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
+
+
+class ConsulACLNotFoundException(Exception):
+ """
+ Exception raised if an ACL with is not found.
+ """
+
+
+class Configuration:
+ """
+ Configuration for this module.
+ """
+
+ def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
+ rules=None, state=None, token=None, token_type=None):
+ self.management_token = management_token # type: str
+ self.host = host # type: str
+ self.scheme = scheme # type: str
+ self.validate_certs = validate_certs # type: bool
+ self.name = name # type: str
+ self.port = port # type: int
+ self.rules = rules # type: RuleCollection
+ self.state = state # type: str
+ self.token = token # type: str
+ self.token_type = token_type # type: str
+
+
+class Output:
+ """
+ Output of an action of this module.
+ """
+
+ def __init__(self, changed=None, token=None, rules=None, operation=None):
+ self.changed = changed # type: bool
+ self.token = token # type: str
+ self.rules = rules # type: RuleCollection
+ self.operation = operation # type: str
+
+
+class ACL:
+ """
+ Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
+ """
+
+ def __init__(self, rules, token_type, token, name):
+ self.rules = rules
+ self.token_type = token_type
+ self.token = token
+ self.name = name
+
+ def __eq__(self, other):
+ return other \
+ and isinstance(other, self.__class__) \
+ and self.rules == other.rules \
+ and self.token_type == other.token_type \
+ and self.token == other.token \
+ and self.name == other.name
+
+ def __hash__(self):
+ return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
+
+
+class Rule:
+ """
+ ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
+ """
+
+ def __init__(self, scope, policy, pattern=None):
+ self.scope = scope
+ self.policy = policy
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.scope == other.scope \
+ and self.policy == other.policy \
+ and self.pattern == other.pattern
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
+
+ def __str__(self):
+ return encode_rule_as_hcl_string(self)
+
+
+class RuleCollection:
+ """
+ Collection of ACL rules, which are part of a Consul ACL.
+ """
+
+ def __init__(self):
+ self._rules = {}
+ for scope in RULE_SCOPES:
+ self._rules[scope] = {}
+
+ def __iter__(self):
+ all_rules = []
+ for scope, pattern_keyed_rules in self._rules.items():
+ for pattern, rule in pattern_keyed_rules.items():
+ all_rules.append(rule)
+ return iter(all_rules)
+
+ def __len__(self):
+ count = 0
+ for scope in RULE_SCOPES:
+ count += len(self._rules[scope])
+ return count
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and set(self) == set(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return encode_rules_as_hcl_string(self)
+
+ def add(self, rule):
+ """
+ Adds the given rule to this collection.
+ :param rule: model of a rule
+ :raises ValueError: raised if there already exists a rule for a given scope and pattern
+ """
+ if rule.pattern in self._rules[rule.scope]:
+ patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
+ raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
+ self._rules[rule.scope][rule.pattern] = rule
+
+
+def get_consul_client(configuration):
+ """
+ Gets a Consul client for the given configuration.
+
+ Does not check if the Consul client can connect.
+ :param configuration: the run configuration
+ :return: Consul client
+ """
+ token = configuration.management_token
+ if token is None:
+ token = configuration.token
+ if token is None:
+ raise AssertionError("Expecting the management token to always be set")
+ return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
+ verify=configuration.validate_certs, token=token)
+
+
+def check_dependencies():
+ """
+ Checks that the required dependencies have been imported.
+ :exception ImportError: if it is detected that any of the required dependencies have not been imported
+ """
+ if not python_consul_installed:
+ raise ImportError("python-consul required for this module. "
+ "See: https://python-consul.readthedocs.io/en/latest/#installation")
+
+ if not pyhcl_installed:
+ raise ImportError("pyhcl required for this module. "
+ "See: https://pypi.org/project/pyhcl/")
+
+ if not has_requests:
+ raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
+
+
+def main():
+ """
+ Main method.
+ """
+ module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
+
+ try:
+ check_dependencies()
+ except ImportError as e:
+ module.fail_json(msg=str(e))
+
+ configuration = Configuration(
+ management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
+ host=module.params.get(HOST_PARAMETER_NAME),
+ scheme=module.params.get(SCHEME_PARAMETER_NAME),
+ validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
+ name=module.params.get(NAME_PARAMETER_NAME),
+ port=module.params.get(PORT_PARAMETER_NAME),
+ rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
+ state=module.params.get(STATE_PARAMETER_NAME),
+ token=module.params.get(TOKEN_PARAMETER_NAME),
+ token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
+ )
+ consul_client = get_consul_client(configuration)
+
+ try:
+ if configuration.state == PRESENT_STATE_VALUE:
+ output = set_acl(consul_client, configuration)
+ else:
+ output = remove_acl(consul_client, configuration)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ configuration.host, configuration.port, str(e)))
+ raise
+
+ return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
+ if output.rules is not None:
+ return_values["rules"] = encode_rules_as_json(output.rules)
+ module.exit_json(**return_values)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
new file mode 100644
index 00000000..ee5c3970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2018 Genome Research Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster
+description:
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as C(value).
+ - If the C(key) represents a prefix then note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - See http://www.consul.io/docs/agent/http.html#kv for more details.
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
+ contents will be set to the value supplied and `changed` will be set to `true` only if the value was
+ different to the current contents. If the state is 'present' and `value` is not set, the existing value
+ associated to the key will be returned. The state 'absent' will remove the key/value pair,
+ again 'changed' will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states 'acquire' or
+ 'release' respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ choices: [ absent, acquire, present, release ]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value should be associated with the given key, required if C(state)
+ is C(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to C(yes).
+ type: bool
+ retrieve:
+ description:
+ - If the I(state) is C(present) and I(value) is set, perform a
+ read after setting the value and return this value.
+ default: True
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock
+ associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ Consul will only put the key if it does not already exist. If the
+ C(cas) value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the consul agent.
+ type: bool
+ default: 'yes'
+'''
+
+
+EXAMPLES = '''
+# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
+# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
+- name: Retrieve a value from the key/value store
+ community.general.consul_kv:
+ key: somekey
+ register: retrieved_key
+
+- name: Add or update the value associated with a key in the key/value store
+ community.general.consul_kv:
+ key: somekey
+ value: somevalue
+
+- name: Remove a key from the store
+ community.general.consul_kv:
+ key: somekey
+ state: absent
+
+- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+ community.general.consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: top_secret
+
+- name: Register a key/value pair with an associated session
+ community.general.consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
+# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
+# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
+NOT_SET = None
+
+
+def _has_value_changed(consul_client, key, target_value):
+ """
+ Uses the given Consul client to determine if the value associated to the given key is different to the given target
+ value.
+ :param consul_client: Consul connected client
+ :param key: key in Consul
+ :param target_value: value to be associated to the key
+ :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
+ value has changed (i.e. the stored value is not the target value)
+ """
+ index, existing = consul_client.kv.get(key)
+ if not existing:
+ return index, True
+ try:
+ changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
+ return index, changed
+ except UnicodeError:
+ # Existing value was not decodable but all values we set are valid utf-8
+ return index, True
+
+
+def execute(module):
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ elif state == 'present':
+ if module.params.get('value') is NOT_SET:
+ get_value(module)
+ else:
+ set_value(module)
+ elif state == 'absent':
+ remove_value(module)
+ else:
+ module.exit_json(msg="Unsupported state: %s" % (state, ))
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def get_value(module):
+ consul_api = get_consul_api(module)
+ key = module.params.get('key')
+
+ index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
+
+ module.exit_json(changed=False, index=index, data=existing_value)
+
+
+def set_value(module):
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if value is NOT_SET:
+ raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ stored = None
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing is not None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cas=dict(type='str'),
+ flags=dict(type='str'),
+ key=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ port=dict(type='int', default=8500),
+ recurse=dict(type='bool'),
+ retrieve=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
+ token=dict(type='str', no_log=True),
+ value=dict(type='str', default=NOT_SET),
+ session=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
new file mode 100644
index 00000000..f28d3a5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+author:
+- Steve Gargan (@sgargan)
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: True
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+'''
+
+EXAMPLES = '''
+- name: Register basic session with consul
+ community.general.consul_session:
+ name: session1
+
+- name: Register a session with an existing check
+ community.general.consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: Register a session with lock_delay
+ community.general.consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: Retrieve info about session by id
+ community.general.consul_session:
+ id: session_id
+ state: info
+
+- name: Retrieve active sessions
+ community.general.consul_session:
+ state: list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
new file mode 100644
index 00000000..78838429
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+#
+# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: etcd3
+short_description: "Set or delete key value pairs from an etcd3 cluster"
+requirements:
+ - etcd3
+description:
+ - Sets or deletes values in etcd3 cluster using its v3 api.
+ - Needs python etcd3 lib to work
+options:
+ key:
+ description:
+ - the key where the information is stored in the cluster
+ required: true
+ value:
+ description:
+ - the information stored
+ required: true
+ host:
+ description:
+ - the IP address of the cluster
+ default: 'localhost'
+ port:
+ description:
+ - the port number used to connect to the cluster
+ default: 2379
+ state:
+ description:
+ - the state of the value for the key.
+ - can be present or absent
+ required: true
+ choices: [ present, absent ]
+ user:
+ description:
+ - The etcd user to authenticate with.
+ password:
+ description:
+ - The password to use for authentication.
+ - Required if I(user) is defined.
+ ca_cert:
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if I(client_cert) and I(client_key) are defined.
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if I(client_key) is defined.
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if I(client_cert) is defined.
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+author:
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+'''
+
+EXAMPLES = """
+- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ host: "localhost"
+ port: 2379
+ state: "present"
+
+- name: Authenticate using user/password combination with a timeout of 10 seconds
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ user: "someone"
+ password: "password123"
+ timeout: 10
+
+- name: Authenticate using TLS certificates
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ ca_cert: "/etc/ssl/certs/CA_CERT.pem"
+ client_cert: "/etc/ssl/certs/cert.crt"
+ client_key: "/etc/ssl/private/key.pem"
+"""
+
+RETURN = '''
+key:
+ description: The key that was queried
+ returned: always
+ type: str
+old_value:
+ description: The previous value in the cluster
+ returned: always
+ type: str
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ ETCD_IMP_ERR = traceback.format_exc()
+ HAS_ETCD = False
+
+
+def run_module():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=2379),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ user=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ ca_cert=dict(type='path'),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ timeout=dict(type='int'),
+ )
+
+ # seed the result dict in the object
+ # we primarily care about changed and state
+ # change is if this module effectively modified the target
+ # state will include any data that you want your module to pass back
+ # for consumption, for example, in a subsequent task
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[['client_cert', 'client_key'], ['user', 'password']],
+ )
+
+ # It is possible to set `ca_cert` to verify the server identity without
+ # setting `client_cert` or `client_key` to authenticate the client
+ # so required_together is enough
+ # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
+ # of either `client_cert` or `client_key` is enough
+ if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
+ module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
+
+ result['key'] = module.params.get('key')
+ module.params['cert_cert'] = module.params.pop('client_cert')
+ module.params['cert_key'] = module.params.pop('client_key')
+
+ if not HAS_ETCD:
+ module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
+
+ allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
+ 'timeout', 'user', 'password']
+ # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
+ # the minimum supported version
+ # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
+ client_params = dict()
+ for key, value in module.params.items():
+ if key in allowed_keys:
+ client_params[key] = value
+ try:
+ etcd = etcd3.client(**client_params)
+ except Exception as exp:
+ module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+ try:
+ cluster_value = etcd.get(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+
+ # Make the cluster_value[0] a string for string comparisons
+ result['old_value'] = to_native(cluster_value[0])
+
+ if module.params['state'] == 'absent':
+ if cluster_value[0] is not None:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.delete(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ elif module.params['state'] == 'present':
+ if result['old_value'] != module.params['value']:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.put(module.params['key'], module.params['value'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="State not recognized")
+
+ # manipulate or modify the state as needed (this is going to be the
+ # part where your module will do what it needs to do)
+
+ # during the execution of the module, if there is an exception or a
+ # conditional state that effectively causes a failure, run
+ # AnsibleModule.fail_json() to pass in the message and the result
+
+ # in the event of a successful module execution, you will want to
+ # simple AnsibleModule.exit_json(), passing the key/value results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
new file mode 100644
index 00000000..6c285797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Launch a Nomad Job
+description:
+ - Launch a Nomad job.
+ - Stop a Nomad job.
+ - Force start a Nomad job
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or I(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or I(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Create job
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
+- name: Stop job
+ community.general.nomad_job:
+ host: localhost
+ state: absent
+ name: api
+
+- name: Force job to start
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ name: api
+ timeout: 120
+ force_start: true
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ content_format=dict(choices=['hcl', 'json'], default='hcl'),
+ content=dict(type='str', default=None),
+ force_start=dict(type='bool', default=False),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ["name", "content"]
+ ],
+ required_one_of=[
+ ['name', 'content']
+ ]
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ if module.params.get('state') == "present":
+
+ if module.params.get('name') and not module.params.get('force_start'):
+ module.fail_json(msg='For start job with name, force_start is needed')
+
+ changed = False
+ if module.params.get('content'):
+
+ if module.params.get('content_format') == 'json':
+
+ job_json = module.params.get('content')
+ try:
+ job_json = json.loads(job_json)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+ job = dict()
+ job['job'] = job_json
+ try:
+ job_id = job_json.get('ID')
+ if job_id is None:
+ module.fail_json(msg="Cannot retrieve job with ID None")
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('content_format') == 'hcl':
+
+ try:
+ job_hcl = module.params.get('content')
+ job_json = nomad_client.jobs.parse(job_hcl)
+ job = dict()
+ job['job'] = job_json
+ except nomad.api.exceptions.BadRequestNomadException as err:
+ msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
+ module.fail_json(msg=to_native(msg))
+ try:
+ job_id = job_json.get('ID')
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('force_start'):
+
+ try:
+ job = dict()
+ if module.params.get('name'):
+ job_name = module.params.get('name')
+ else:
+ job_name = job_json['Name']
+ job_json = nomad_client.job.get_job(job_name)
+ if job_json['Status'] == 'running':
+ result = job_json
+ else:
+ job_json['Status'] = 'running'
+ job_json['Stop'] = False
+ job['job'] = job_json
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = nomad_client.validate.validate_job(job)
+ if not result.status_code == 200:
+ module.fail_json(msg=to_native(result.text))
+ result = json.loads(result.text)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ try:
+ if not module.params.get('name') is None:
+ job_name = module.params.get('name')
+ else:
+ if module.params.get('content_format') == 'hcl':
+ job_json = nomad_client.jobs.parse(module.params.get('content'))
+ job_name = job_json['Name']
+ if module.params.get('content_format') == 'json':
+ job_json = module.params.get('content')
+ job_name = job_json['Name']
+ job = nomad_client.job.get_job(job_name)
+ if job['Status'] == 'dead':
+ changed = False
+ result = job
+ else:
+ if not module.check_mode:
+ result = nomad_client.job.deregister_job(job_name)
+ else:
+ result = job
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
new file mode 100644
index 00000000..9e935328
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job_info
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Get Nomad Jobs info
+description:
+ - Get info for one Nomad job.
+ - List Nomad jobs.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Get info for job awx
+ community.general.nomad_job:
+ host: localhost
+ name: awx
+ register: result
+
+- name: List Nomad jobs
+ community.general.nomad_job:
+ host: localhost
+ register: result
+
+'''
+
+RETURN = '''
+result:
+ description: List with dictionary contains jobs info
+ returned: success
+ type: list
+ sample: [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
+ "Affinities": null,
+ "Constraints": null,
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
+ },
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
+ ]
+
+'''
+
+
+import os
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ changed = False
+ nomad_jobs = list()
+ try:
+ job_list = nomad_client.jobs.get_jobs()
+ for job in job_list:
+ nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
+ result = nomad_jobs
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('name'):
+ filter = list()
+ try:
+ for job in result:
+ if job.get('ID') == module.params.get('name'):
+ filter.append(job)
+ result = filter
+ if not filter:
+ module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
new file mode 100644
index 00000000..4ec6010f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacemaker_cluster
+short_description: Manage pacemaker clusters
+author:
+- Mathieu Bultel (@matbu)
+description:
+ - This module can manage a pacemaker cluster and nodes from Ansible using
+ the pacemaker cli.
+options:
+ state:
+ description:
+ - Indicate desired state of the cluster
+ choices: [ cleanup, offline, online, restart ]
+ type: str
+ node:
+ description:
+ - Specify which node of the cluster you want to manage. None == the
+ cluster status itself, 'all' == check the status of all nodes.
+ type: str
+ timeout:
+ description:
+ - Timeout when the module should considered that the action has failed
+ default: 300
+ type: int
+ force:
+ description:
+ - Force the change of the cluster state
+ type: bool
+ default: 'yes'
+'''
+EXAMPLES = '''
+---
+- name: Set cluster Online
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Get cluster state
+ community.general.pacemaker_cluster:
+ state: online
+'''
+
+RETURN = '''
+changed:
+ description: True if the cluster state has changed
+ type: bool
+ returned: always
+out:
+ description: The output of the current state of the cluster. It return a
+ list of the nodes state.
+ type: str
+ sample: 'out: [[" overcloud-controller-0", " Online"]]}'
+ returned: always
+rc:
+ description: exit code of the module
+ type: bool
+ returned: always
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+
+
+def get_cluster_status(module):
+ cmd = "pcs cluster status"
+ rc, out, err = module.run_command(cmd)
+ if out in _PCS_CLUSTER_DOWN:
+ return 'offline'
+ else:
+ return 'online'
+
+
+def get_node_status(module, node='all'):
+ if node == 'all':
+ cmd = "pcs cluster pcsd-status %s" % node
+ else:
+ cmd = "pcs cluster pcsd-status"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ status = []
+ for o in out.splitlines():
+ status.append(o.split(':'))
+ return status
+
+
+def clean_cluster(module, timeout):
+ cmd = "pcs resource cleanup"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+
+def set_cluster(module, state, timeout, force):
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def set_node(module, state, timeout, force, node='all'):
+ # map states
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+
+ nodes_state = get_node_status(module, node)
+ for node in nodes_state:
+ if node[1].strip().lower() != state:
+ cmd = "%s %s" % (cmd, node[0].strip())
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ nodes_state = get_node_status(module)
+ for node in nodes_state:
+ if node[1].strip().lower() == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
+ node=dict(type='str'),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ changed = False
+ state = module.params['state']
+ node = module.params['node']
+ force = module.params['force']
+ timeout = module.params['timeout']
+
+ if state in ['online', 'offline']:
+ # Get cluster status
+ if node is None:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Fail to bring the cluster %s" % state)
+ else:
+ cluster_state = get_node_status(module, node)
+ # Check cluster state
+ for node_state in cluster_state:
+ if node_state[1].strip().lower() == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ # Set cluster status if needed
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_node_status(module, node)
+ module.exit_json(changed=True, out=cluster_state)
+
+ if state in ['restart']:
+ set_cluster(module, 'offline', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'offline':
+ set_cluster(module, 'online', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'online':
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
+
+ if state in ['cleanup']:
+ clean_cluster(module, timeout)
+ cluster_state = get_cluster_status(module)
+ module.exit_json(changed=True,
+ out=cluster_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py
new file mode 100644
index 00000000..156a6376
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: znode
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper
+description:
+ - Create, delete, retrieve, and update znodes using ZooKeeper.
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ value:
+ description:
+ - The value assigned to the znode.
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ choices: [ get, wait, list ]
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ type: bool
+ default: 'no'
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+'''
+
+EXAMPLES = """
+- name: Creating or updating a znode with a given value
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+
+- name: Getting the value and stat structure for a znode
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: get
+
+- name: Listing a particular znode's children
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /zookeeper
+ op: list
+
+- name: Waiting 20 seconds for a znode to appear at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: wait
+ timeout: 20
+
+- name: Deleting a znode at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ state: absent
+
+- name: Creating or updating a znode with a given value on a remote Zookeeper
+ community.general.znode:
+ hosts: 'my-zookeeper-node:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+ delegate_to: 127.0.0.1
+"""
+
+import time
+import traceback
+
+KAZOO_IMP_ERR = None
+try:
+ from kazoo.client import KazooClient
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_IMP_ERR = traceback.format_exc()
+ KAZOO_INSTALLED = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(required=False, default=300, type='int'),
+ recursive=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if isinstance(attr, (int, str)):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, to_bytes(value))
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, to_bytes(value), makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py
new file mode 100644
index 00000000..2e5f080d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_sync
+short_description: Sync Cobbler
+description:
+- Sync Cobbler to commit changes.
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+author:
+- Dag Wieers (@dagwieers)
+todo:
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Commit Cobbler changes
+ community.general.cobbler_sync:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ run_once: yes
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=True,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
+
+ if not module.check_mode:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py
new file mode 100644
index 00000000..ecabcc8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_system
+short_description: Manage system objects in Cobbler
+description:
+- Add, modify or remove systems in Cobbler
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The system name to manage.
+ type: str
+ properties:
+ description:
+ - A dictionary with system properties.
+ type: dict
+ interfaces:
+ description:
+ - A list of dictionaries containing interface options.
+ type: dict
+ sync:
+ description:
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
+ type: bool
+ default: no
+ state:
+ description:
+ - Whether the system should be present, absent or a query is made.
+ choices: [ absent, present, query ]
+ default: present
+ type: str
+author:
+- Dag Wieers (@dagwieers)
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Ensure the system exists in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ properties:
+ profile: CentOS6-x86_64
+ name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers_search: foo.com, bar.com
+ interfaces:
+ eth0:
+ macaddress: 00:01:02:03:04:05
+ ipaddress: 1.2.3.4
+ delegate_to: localhost
+
+- name: Enable network boot in Cobbler
+ community.general.cobbler_system:
+ host: bdsol-aci-cobbler-01
+ username: cobbler
+ password: ins3965!
+ name: bdsol-aci51-apic1.cisco.com
+ properties:
+ netboot_enabled: yes
+ state: present
+ delegate_to: localhost
+
+- name: Query all systems in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ state: query
+ register: cobbler_systems
+ delegate_to: localhost
+
+- name: Query a specific system in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: '{{ inventory_hostname }}'
+ state: query
+ register: cobbler_properties
+ delegate_to: localhost
+
+- name: Ensure the system does not exist in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+systems:
+ description: List of systems
+ returned: C(state=query) and C(name) is not provided
+ type: list
+system:
+ description: (Resulting) information about the system we are working with
+ returned: when C(name) is provided
+ type: dict
+'''
+
+import copy
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+IFPROPS_MAPPING = dict(
+ bondingopts='bonding_opts',
+ bridgeopts='bridge_opts',
+ connected_mode='connected_mode',
+ cnames='cnames',
+ dhcptag='dhcp_tag',
+ dnsname='dns_name',
+ ifgateway='if_gateway',
+ interfacetype='interface_type',
+ interfacemaster='interface_master',
+ ipaddress='ip_address',
+ ipv6address='ipv6_address',
+ ipv6defaultgateway='ipv6_default_gateway',
+ ipv6mtu='ipv6_mtu',
+ ipv6prefix='ipv6_prefix',
+ ipv6secondaries='ipv6_secondariesu',
+ ipv6staticroutes='ipv6_static_routes',
+ macaddress='mac_address',
+ management='management',
+ mtu='mtu',
+ netmask='netmask',
+ static='static',
+ staticroutes='static_routes',
+ virtbridge='virt_bridge',
+)
+
+
+def getsystem(conn, name, token):
+ system = dict()
+ if name:
+ # system = conn.get_system(name, token)
+ systems = conn.find_system(dict(name=name), token)
+ if systems:
+ system = systems[0]
+ return system
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ name=dict(type='str'),
+ interfaces=dict(type='dict'),
+ properties=dict(type='dict'),
+ sync=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ state = module.params['state']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=False,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
+
+ system = getsystem(conn, name, token)
+ # result['system'] = system
+
+ if state == 'query':
+ if name:
+ result['system'] = system
+ else:
+ # Turn it into a dictionary of dictionaries
+ # all_systems = conn.get_systems()
+ # result['systems'] = { system['name']: system for system in all_systems }
+
+ # Return a list of dictionaries
+ result['systems'] = conn.get_systems()
+
+ elif state == 'present':
+
+ if system:
+ # Update existing entry
+ system_id = conn.get_system_handle(name, token)
+
+ for key, value in iteritems(module.params['properties']):
+ if key not in system:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if system[key] != value:
+ try:
+ conn.modify_system(system_id, key, value, token)
+ result['changed'] = True
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ else:
+ # Create a new entry
+ system_id = conn.new_system(token)
+ conn.modify_system(system_id, 'name', name, token)
+ result['changed'] = True
+
+ if module.params['properties']:
+ for key, value in iteritems(module.params['properties']):
+ try:
+ conn.modify_system(system_id, key, value, token)
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ # Add interface properties
+ interface_properties = dict()
+ if module.params['interfaces']:
+ for device, values in iteritems(module.params['interfaces']):
+ for key, value in iteritems(values):
+ if key == 'name':
+ continue
+ if key not in IFPROPS_MAPPING:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
+ result['changed'] = True
+ interface_properties['{0}-{1}'.format(key, device)] = value
+
+ if result['changed'] is True:
+ conn.modify_system(system_id, "modify_interface", interface_properties, token)
+
+ # Only save when the entry was changed
+ if not module.check_mode and result['changed']:
+ conn.save_system(system_id, token)
+
+ elif state == 'absent':
+
+ if system:
+ if not module.check_mode:
+ conn.remove_system(name, token)
+ result['changed'] = True
+
+ if not module.check_mode and module.params['sync'] and result['changed']:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
+
+ if state in ('absent', 'present'):
+ result['system'] = getsystem(conn, name, token)
+
+ if module._diff:
+ result['diff'] = dict(before=system, after=result['system'])
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py
new file mode 100644
index 00000000..3bc09c2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+description:
+ - >
+ Composer is a tool for dependency management in PHP. It allows you to
+ declare the dependent libraries your project needs and it will install
+ them in your project for you.
+options:
+ command:
+ type: str
+ description:
+ - Composer command like "install", "update" and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ executable:
+ type: path
+ description:
+ - Path to PHP Executable on the remote host, if PHP is not in PATH.
+ aliases: [ php_path ]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see --working-dir). This is required when
+ the command is not run globally.
+ - Will be ignored if C(global_command=true).
+ aliases: [ working-dir ]
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ aliases: [ global-command ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see --prefer-source).
+ default: false
+ type: bool
+ aliases: [ prefer-source ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see --prefer-dist).
+ default: false
+ type: bool
+ aliases: [ prefer-dist ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see --no-dev).
+ default: true
+ type: bool
+ aliases: [ no-dev ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see --no-scripts).
+ default: false
+ type: bool
+ aliases: [ no-scripts ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins ).
+ default: false
+ type: bool
+ aliases: [ no-plugins ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see --optimize-autoloader).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ aliases: [ optimize-autoloader ]
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitely enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ aliases: [ classmap-authoritative ]
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes
+ default: false
+ type: bool
+ aliases: [ apcu-autoloader ]
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ default: false
+ type: bool
+ aliases: [ ignore-platform-reqs ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
+'''
+
+EXAMPLES = '''
+- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
+ community.general.composer:
+ command: install
+ working_dir: /path/to/project
+
+- name: Install a new package
+ community.general.composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+- name: Clone and install a project with all dependencies
+ community.general.composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: yes
+
+- name: Install a package globally
+ community.general.composer:
+ command: require
+ global_command: yes
+ arguments: my/package
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_out(string):
+ return re.sub(r"\s+", " ", string).strip()
+
+
+def has_changed(string):
+ for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
+ if no_change in string:
+ return False
+
+ return True
+
+
+def get_available_options(module, command='install'):
+ # get all available options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s --format=json" % command)
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = module.from_json(out)
+ return command_help_json['definition']['options']
+
+
+def composer_command(module, command, arguments="", options=None, global_command=False):
+ if options is None:
+ options = []
+
+ if module.params['executable'] is None:
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ else:
+ php_path = module.params['executable']
+
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(default="install", type="str"),
+ arguments=dict(default="", type="str"),
+ executable=dict(type="path", aliases=["php_path"]),
+ working_dir=dict(type="path", aliases=["working-dir"]),
+ global_command=dict(default=False, type="bool", aliases=["global-command"]),
+ prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
+ prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
+ no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
+ no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
+ no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
+ apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]),
+ optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
+ classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]),
+ ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
+ ),
+ required_if=[('global_command', False, ['working_dir'])],
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ global_command = module.params['global_command']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no-plugins',
+ 'apcu_autoloader': 'acpu-autoloader',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'classmap_authoritative': 'classmap-authoritative',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.items():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ if 'dry-run' in available_options:
+ options.append('--dry-run')
+ else:
+ module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
+
+ rc, out, err = composer_command(module, command, arguments, options, global_command)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py
new file mode 100644
index 00000000..dd8a5f50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py
@@ -0,0 +1,603 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul
+short_description: "Add, modify & delete services within a consul cluster."
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a I(check_name) and optionally a I(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An API method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See U(http://consul.io) for more details."
+requirements:
+ - python-consul
+ - requests
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - register or deregister the consul service, defaults to present
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be omitted if registering
+ a node level check
+ service_id:
+ type: str
+ description:
+ - the ID for the service, must be unique per node. If I(state=absent),
+ defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - host of the consul agent defaults to localhost
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - the protocol scheme on which the consul agent is running
+ default: http
+ validate_certs:
+ description:
+ - whether to verify the TLS certificate of the consul agent
+ type: bool
+ default: 'yes'
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - the port on which the service is listening. Can optionally be supplied for
+ registration of a service, i.e. if I(service_name) or I(service_id) is set
+ service_address:
+ type: str
+ description:
+ - the address to advertise that the service will be listening on.
+ This value will be passed as the I(address) parameter to Consul's
+ U(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ tags:
+ type: list
+ description:
+ - tags that will be attached to the service registration.
+ script:
+ type: str
+ description:
+ - the script/command that will be run periodically to check the health
+ of the service. Scripts require I(interval) and vice versa.
+ interval:
+ type: str
+ description:
+ - the interval at which the service check will be run. This is a number
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
+ C(1) will be C(1m). Required if the I(script) parameter is specified.
+ check_id:
+ type: str
+ description:
+ - an ID for the service check. If I(state=absent), defaults to
+ I(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - a name for the service check. Required if standalone, ignored if
+ part of service definition.
+ ttl:
+ type: str
+ description:
+ - checks can be registered with a ttl instead of a I(script) and I(interval)
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
+ is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that consul
+ will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
+ The format is C(host:port), for example C(localhost:80).
+ I(interval) must also be provided with this option.
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - checks can be registered with an HTTP endpoint. This means that consul
+ will check that the http endpoint returns a successful HTTP status.
+ I(interval) must also be provided with this option.
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes, e.g. C(15s) or C(1m).
+ token:
+ type: str
+ description:
+ - the token key identifying an ACL rule set. May be required to register services.
+'''
+
+EXAMPLES = '''
+- name: Register nginx service with the local consul agent
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+
+- name: Register nginx service with curl check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ script: curl http://localhost
+ interval: 60s
+
+- name: register nginx with a tcp check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ tcp: localhost:80
+
+- name: Register nginx with an http check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: http://localhost:80/status
+
+- name: Register external service nginx available at 10.1.5.23
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+- name: Register nginx with some service tags
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+- name: Remove nginx service
+ community.general.consul:
+ service_name: nginx
+ state: absent
+
+- name: Register celery worker service
+ community.general.consul:
+ service_name: celery-worker
+ tags:
+ - prod
+ - worker
+
+- name: Create a node level check to test disk usage
+ community.general.consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: /opt/disk_usage.py
+ interval: 5m
+
+- name: Register an http check against a service that's already registered
+ community.general.consul:
+ check_name: nginx-check2
+ check_id: nginx-check2
+ service_id: nginx
+ interval: 60s
+ http: http://localhost:80/morestatus
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+
+ class PatchedConsulAgentService(consul.Consul.Agent.Service):
+ def deregister(self, service_id, token=None):
+ params = {}
+ if token:
+ params['token'] = token
+ return self.agent.http.put(consul.base.CB.bool(),
+ '/v1/agent/service/deregister/%s' % service_id,
+ params=params)
+
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def register_with_consul(module):
+ state = module.params.get('state')
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params.get('service_id') or module.params.get('service_name')
+ check_id = module.params.get('check_id') or module.params.get('check_name')
+ if not (service_id or check_id):
+ module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name and not check.service_id:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ tcp=check.tcp,
+ http=check.http,
+ timeout=check.timeout,
+ service_id=check.service_id)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id_or_name(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id_or_name(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id_or_name(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module, token=None):
+ consulClient = consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+ consulClient.agent.service = PatchedConsulAgentService(consulClient)
+ return consulClient
+
+
+def get_service_by_id_or_name(consul_api, service_id_or_name):
+ ''' iterate the registered services and find one with the given id '''
+ for name, service in consul_api.agent.services().items():
+ if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+ if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1:
+ module.fail_json(
+ msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'):
+
+ return ConsulCheck(
+ module.params.get('check_id'),
+ module.params.get('check_name'),
+ module.params.get('check_node'),
+ module.params.get('check_host'),
+ module.params.get('script'),
+ module.params.get('interval'),
+ module.params.get('ttl'),
+ module.params.get('notes'),
+ module.params.get('tcp'),
+ module.params.get('http'),
+ module.params.get('timeout'),
+ module.params.get('service_id'),
+ )
+
+
+def parse_service(module):
+ if module.params.get('service_name'):
+ return ConsulService(
+ module.params.get('service_id'),
+ module.params.get('service_name'),
+ module.params.get('service_address'),
+ module.params.get('service_port'),
+ module.params.get('tags'),
+ )
+ elif not module.params.get('service_name'):
+ module.fail_json(msg="service_name is required to configure a service.")
+
+
+class ConsulService():
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self.checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ optional = {}
+
+ if self.port:
+ optional['port'] = self.port
+
+ if len(self.checks) > 0:
+ optional['check'] = self.checks[0].check
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ tags=self.tags,
+ **optional)
+
+ def add_check(self, check):
+ self.checks.append(check)
+
+ def checks(self):
+ return self.checks
+
+ def has_checks(self):
+ return len(self.checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.id == other.id and
+ self.name == other.name and
+ self.port == other.port and
+ self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self.checks) > 0:
+ data['check'] = self.checks[0].to_dict()
+ return data
+
+
+class ConsulCheck(object):
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.service_id = service_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.tcp = tcp
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+ if tcp:
+ if interval is None:
+ raise Exception('tcp check must specify interval')
+
+ regex = r"(?P<host>.*)(?::)(?P<port>(?:[0-9]+))$"
+ match = re.match(regex, tcp)
+
+ if match is None:
+ raise Exception('tcp check must be in host:port format')
+
+ self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any((duration.endswith(suffix) for suffix in duration_units)):
+ duration = "{0}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.check_id == other.check_id and
+ self.service_id == other.service_id and
+ self.name == other.name and
+ self.script == other.script and
+ self.interval == other.interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'tcp')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ self._add(data, 'service_id')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except Exception:
+ pass
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ check_id=dict(required=False),
+ check_name=dict(required=False),
+ check_node=dict(required=False),
+ check_host=dict(required=False),
+ notes=dict(required=False),
+ script=dict(required=False),
+ service_id=dict(required=False),
+ service_name=dict(required=False),
+ service_address=dict(required=False, type='str', default=None),
+ service_port=dict(required=False, type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(required=False, type='str'),
+ ttl=dict(required=False, type='str'),
+ tcp=dict(required=False, type='str'),
+ http=dict(required=False, type='str'),
+ timeout=dict(required=False, type='str'),
+ tags=dict(required=False, type='list'),
+ token=dict(required=False, no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ test_dependencies(module)
+
+ try:
+ register_with_consul(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py
new file mode 100644
index 00000000..06feeea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_acl
+short_description: Manipulate Consul ACL keys and rules
+description:
+ - Allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ required: true
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ token_type:
+ description:
+ - the type of token that should be created
+ choices: ['client', 'management']
+ default: client
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ token:
+ description:
+ - the token key identifying an ACL rule set. If generated by consul
+ this will be a UUID
+ required: false
+ rules:
+ type: list
+ description:
+ - rules that should be associated with a given token
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ validate_certs:
+ type: bool
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+requirements:
+ - python-consul
+ - pyhcl
+ - requests
+'''
+
+EXAMPLES = """
+- name: Create an ACL with rules
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ rules:
+ - key: "foo"
+ policy: read
+ - key: "private/foo"
+ policy: deny
+
+- name: Create an ACL with a specific token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: my-token
+ rules:
+ - key: "foo"
+ policy: read
+
+- name: Update the rules associated to an ACL token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: some_client_token
+ rules:
+ - event: "bbq"
+ policy: write
+ - key: "foo"
+ policy: read
+ - key: "private"
+ policy: deny
+ - keyring: write
+ - node: "hgs4"
+ policy: write
+ - operator: read
+ - query: ""
+ policy: write
+ - service: "consul"
+ policy: write
+ - session: "standup"
+ policy: write
+
+- name: Remove a token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
+ state: absent
+"""
+
+RETURN = """
+token:
+ description: the token associated to the ACL (the ACL's ID)
+ returned: success
+ type: str
+ sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
+rules:
+ description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
+ Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
+ returned: I(status) == "present"
+ type: str
+ sample: {
+ "key": {
+ "foo": {
+ "policy": "write"
+ },
+ "bar": {
+ "policy": "deny"
+ }
+ }
+ }
+operation:
+ description: the operation performed on the ACL
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+try:
+ import consul
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+try:
+ from requests.exceptions import ConnectionError
+ has_requests = True
+except ImportError:
+ has_requests = False
+
+from collections import defaultdict
+from ansible.module_utils.basic import to_text, AnsibleModule
+
+
+RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
+
+MANAGEMENT_PARAMETER_NAME = "mgmt_token"
+HOST_PARAMETER_NAME = "host"
+SCHEME_PARAMETER_NAME = "scheme"
+VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
+NAME_PARAMETER_NAME = "name"
+PORT_PARAMETER_NAME = "port"
+RULES_PARAMETER_NAME = "rules"
+STATE_PARAMETER_NAME = "state"
+TOKEN_PARAMETER_NAME = "token"
+TOKEN_TYPE_PARAMETER_NAME = "token_type"
+
+PRESENT_STATE_VALUE = "present"
+ABSENT_STATE_VALUE = "absent"
+
+CLIENT_TOKEN_TYPE_VALUE = "client"
+MANAGEMENT_TOKEN_TYPE_VALUE = "management"
+
+REMOVE_OPERATION = "remove"
+UPDATE_OPERATION = "update"
+CREATE_OPERATION = "create"
+
+_POLICY_JSON_PROPERTY = "policy"
+_RULES_JSON_PROPERTY = "Rules"
+_TOKEN_JSON_PROPERTY = "ID"
+_TOKEN_TYPE_JSON_PROPERTY = "Type"
+_NAME_JSON_PROPERTY = "Name"
+_POLICY_YML_PROPERTY = "policy"
+_POLICY_HCL_PROPERTY = "policy"
+
+_ARGUMENT_SPEC = {
+ MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
+ HOST_PARAMETER_NAME: dict(default='localhost'),
+ SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
+ VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
+ NAME_PARAMETER_NAME: dict(required=False),
+ PORT_PARAMETER_NAME: dict(default=8500, type='int'),
+ RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
+ STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
+ TOKEN_PARAMETER_NAME: dict(required=False),
+ TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
+ default=CLIENT_TOKEN_TYPE_VALUE)
+}
+
+
+def set_acl(consul_client, configuration):
+ """
+ Sets an ACL based on the given configuration.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of setting the ACL
+ """
+ acls_as_json = decode_acls_as_json(consul_client.acl.list())
+ existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
+ existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
+ if None in existing_acls_mapped_by_token:
+ raise AssertionError("expecting ACL list to be associated to a token: %s" %
+ existing_acls_mapped_by_token[None])
+
+ if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
+ # No token but name given so can get token from name
+ configuration.token = existing_acls_mapped_by_name[configuration.name].token
+
+ if configuration.token and configuration.token in existing_acls_mapped_by_token:
+ return update_acl(consul_client, configuration)
+ else:
+ if configuration.token in existing_acls_mapped_by_token:
+ raise AssertionError()
+ if configuration.name in existing_acls_mapped_by_name:
+ raise AssertionError()
+ return create_acl(consul_client, configuration)
+
+
+def update_acl(consul_client, configuration):
+ """
+ Updates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the update
+ """
+ existing_acl = load_acl_with_token(consul_client, configuration.token)
+ changed = existing_acl.rules != configuration.rules
+
+ if changed:
+ name = configuration.name if configuration.name is not None else existing_acl.name
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
+ updated_token = consul_client.acl.update(
+ configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
+ if updated_token != configuration.token:
+ raise AssertionError()
+
+ return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
+
+
+def create_acl(consul_client, configuration):
+ """
+ Creates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the creation
+ """
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
+ token = consul_client.acl.create(
+ name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
+ rules = configuration.rules
+ return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
+
+
+def remove_acl(consul, configuration):
+ """
+ Removes an ACL.
+ :param consul: the consul client
+ :param configuration: the run configuration
+ :return: the output of the removal
+ """
+ token = configuration.token
+ changed = consul.acl.info(token) is not None
+ if changed:
+ consul.acl.destroy(token)
+ return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
+
+
+def load_acl_with_token(consul, token):
+ """
+ Loads the ACL with the given token (token == rule ID).
+ :param consul: the consul client
+ :param token: the ACL "token"/ID (not name)
+ :return: the ACL associated to the given token
+ :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
+ """
+ acl_as_json = consul.acl.info(token)
+ if acl_as_json is None:
+ raise ConsulACLNotFoundException(token)
+ return decode_acl_as_json(acl_as_json)
+
+
+def encode_rules_as_hcl_string(rules):
+ """
+ Converts the given rules into the equivalent HCL (string) representation.
+ :param rules: the rules
+ :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
+ note for justification)
+ """
+ if len(rules) == 0:
+ # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
+ # string if there is no rules...
+ return None
+ rules_as_hcl = ""
+ for rule in rules:
+ rules_as_hcl += encode_rule_as_hcl_string(rule)
+ return rules_as_hcl
+
+
+def encode_rule_as_hcl_string(rule):
+ """
+ Converts the given rule into the equivalent HCL (string) representation.
+ :param rule: the rule
+ :return: the equivalent HCL (string) representation of the rule
+ """
+ if rule.pattern is not None:
+ return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
+ else:
+ return '%s = "%s"\n' % (rule.scope, rule.policy)
+
+
+def decode_rules_as_hcl_string(rules_as_hcl):
+ """
+ Converts the given HCL (string) representation of rules into a list of rule domain models.
+ :param rules_as_hcl: the HCL (string) representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules_as_hcl = to_text(rules_as_hcl)
+ rules_as_json = hcl.loads(rules_as_hcl)
+ return decode_rules_as_json(rules_as_json)
+
+
+def decode_rules_as_json(rules_as_json):
+ """
+ Converts the given JSON representation of rules into a list of rule domain models.
+ :param rules_as_json: the JSON representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ for scope in rules_as_json:
+ if not isinstance(rules_as_json[scope], dict):
+ rules.add(Rule(scope, rules_as_json[scope]))
+ else:
+ for pattern, policy in rules_as_json[scope].items():
+ rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
+ return rules
+
+
+def encode_rules_as_json(rules):
+ """
+ Converts the given rules into the equivalent JSON representation according to the documentation:
+ https://www.consul.io/docs/guides/acl.html#rule-specification.
+ :param rules: the rules
+ :return: JSON representation of the given rules
+ """
+ rules_as_json = defaultdict(dict)
+ for rule in rules:
+ if rule.pattern is not None:
+ if rule.pattern in rules_as_json[rule.scope]:
+ raise AssertionError()
+ rules_as_json[rule.scope][rule.pattern] = {
+ _POLICY_JSON_PROPERTY: rule.policy
+ }
+ else:
+ if rule.scope in rules_as_json:
+ raise AssertionError()
+ rules_as_json[rule.scope] = rule.policy
+ return rules_as_json
+
+
+def decode_rules_as_yml(rules_as_yml):
+ """
+ Converts the given YAML representation of rules into a list of rule domain models.
+ :param rules_as_yml: the YAML representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ if rules_as_yml:
+ for rule_as_yml in rules_as_yml:
+ rule_added = False
+ for scope in RULE_SCOPES:
+ if scope in rule_as_yml:
+ if rule_as_yml[scope] is None:
+ raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
+ policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
+ else rule_as_yml[scope]
+ pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
+ rules.add(Rule(scope, policy, pattern))
+ rule_added = True
+ break
+ if not rule_added:
+ raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
+ return rules
+
+
+def decode_acl_as_json(acl_as_json):
+ """
+ Converts the given JSON representation of an ACL into the equivalent domain model.
+ :param acl_as_json: the JSON representation of an ACL
+ :return: the equivalent domain model to the given ACL
+ """
+ rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
+ rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
+ else RuleCollection()
+ return ACL(
+ rules=rules,
+ token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
+ token=acl_as_json[_TOKEN_JSON_PROPERTY],
+ name=acl_as_json[_NAME_JSON_PROPERTY]
+ )
+
+
+def decode_acls_as_json(acls_as_json):
+ """
+ Converts the given JSON representation of ACLs into a list of ACL domain models.
+ :param acls_as_json: the JSON representation of a collection of ACLs
+ :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
+ """
+ return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
+
+
+class ConsulACLNotFoundException(Exception):
+ """
+ Exception raised if an ACL with is not found.
+ """
+
+
+class Configuration:
+ """
+ Configuration for this module.
+ """
+
+ def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
+ rules=None, state=None, token=None, token_type=None):
+ self.management_token = management_token # type: str
+ self.host = host # type: str
+ self.scheme = scheme # type: str
+ self.validate_certs = validate_certs # type: bool
+ self.name = name # type: str
+ self.port = port # type: int
+ self.rules = rules # type: RuleCollection
+ self.state = state # type: str
+ self.token = token # type: str
+ self.token_type = token_type # type: str
+
+
+class Output:
+ """
+ Output of an action of this module.
+ """
+
+ def __init__(self, changed=None, token=None, rules=None, operation=None):
+ self.changed = changed # type: bool
+ self.token = token # type: str
+ self.rules = rules # type: RuleCollection
+ self.operation = operation # type: str
+
+
+class ACL:
+ """
+ Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
+ """
+
+ def __init__(self, rules, token_type, token, name):
+ self.rules = rules
+ self.token_type = token_type
+ self.token = token
+ self.name = name
+
+ def __eq__(self, other):
+ return other \
+ and isinstance(other, self.__class__) \
+ and self.rules == other.rules \
+ and self.token_type == other.token_type \
+ and self.token == other.token \
+ and self.name == other.name
+
+ def __hash__(self):
+ return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
+
+
+class Rule:
+ """
+ ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
+ """
+
+ def __init__(self, scope, policy, pattern=None):
+ self.scope = scope
+ self.policy = policy
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.scope == other.scope \
+ and self.policy == other.policy \
+ and self.pattern == other.pattern
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
+
+ def __str__(self):
+ return encode_rule_as_hcl_string(self)
+
+
+class RuleCollection:
+ """
+ Collection of ACL rules, which are part of a Consul ACL.
+ """
+
+ def __init__(self):
+ self._rules = {}
+ for scope in RULE_SCOPES:
+ self._rules[scope] = {}
+
+ def __iter__(self):
+ all_rules = []
+ for scope, pattern_keyed_rules in self._rules.items():
+ for pattern, rule in pattern_keyed_rules.items():
+ all_rules.append(rule)
+ return iter(all_rules)
+
+ def __len__(self):
+ count = 0
+ for scope in RULE_SCOPES:
+ count += len(self._rules[scope])
+ return count
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and set(self) == set(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return encode_rules_as_hcl_string(self)
+
+ def add(self, rule):
+ """
+ Adds the given rule to this collection.
+ :param rule: model of a rule
+ :raises ValueError: raised if there already exists a rule for a given scope and pattern
+ """
+ if rule.pattern in self._rules[rule.scope]:
+ patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
+ raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
+ self._rules[rule.scope][rule.pattern] = rule
+
+
+def get_consul_client(configuration):
+ """
+ Gets a Consul client for the given configuration.
+
+ Does not check if the Consul client can connect.
+ :param configuration: the run configuration
+ :return: Consul client
+ """
+ token = configuration.management_token
+ if token is None:
+ token = configuration.token
+ if token is None:
+ raise AssertionError("Expecting the management token to always be set")
+ return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
+ verify=configuration.validate_certs, token=token)
+
+
+def check_dependencies():
+ """
+ Checks that the required dependencies have been imported.
+ :exception ImportError: if it is detected that any of the required dependencies have not been imported
+ """
+ if not python_consul_installed:
+ raise ImportError("python-consul required for this module. "
+ "See: https://python-consul.readthedocs.io/en/latest/#installation")
+
+ if not pyhcl_installed:
+ raise ImportError("pyhcl required for this module. "
+ "See: https://pypi.org/project/pyhcl/")
+
+ if not has_requests:
+ raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
+
+
+def main():
+ """
+ Main method.
+ """
+ module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
+
+ try:
+ check_dependencies()
+ except ImportError as e:
+ module.fail_json(msg=str(e))
+
+ configuration = Configuration(
+ management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
+ host=module.params.get(HOST_PARAMETER_NAME),
+ scheme=module.params.get(SCHEME_PARAMETER_NAME),
+ validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
+ name=module.params.get(NAME_PARAMETER_NAME),
+ port=module.params.get(PORT_PARAMETER_NAME),
+ rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
+ state=module.params.get(STATE_PARAMETER_NAME),
+ token=module.params.get(TOKEN_PARAMETER_NAME),
+ token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
+ )
+ consul_client = get_consul_client(configuration)
+
+ try:
+ if configuration.state == PRESENT_STATE_VALUE:
+ output = set_acl(consul_client, configuration)
+ else:
+ output = remove_acl(consul_client, configuration)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ configuration.host, configuration.port, str(e)))
+ raise
+
+ return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
+ if output.rules is not None:
+ return_values["rules"] = encode_rules_as_json(output.rules)
+ module.exit_json(**return_values)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py
new file mode 100644
index 00000000..ee5c3970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2018 Genome Research Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster
+description:
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as C(value).
+ - If the C(key) represents a prefix then note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - See http://www.consul.io/docs/agent/http.html#kv for more details.
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
+ contents will be set to the value supplied and `changed` will be set to `true` only if the value was
+ different to the current contents. If the state is 'present' and `value` is not set, the existing value
+ associated to the key will be returned. The state 'absent' will remove the key/value pair,
+ again 'changed' will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states 'acquire' or
+ 'release' respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ choices: [ absent, acquire, present, release ]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value should be associated with the given key, required if C(state)
+ is C(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to C(yes).
+ type: bool
+ retrieve:
+ description:
+ - If the I(state) is C(present) and I(value) is set, perform a
+ read after setting the value and return this value.
+ default: True
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock
+ associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ Consul will only put the key if it does not already exist. If the
+ C(cas) value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the consul agent.
+ type: bool
+ default: 'yes'
+'''
+
+
+EXAMPLES = '''
+# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
+# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
+- name: Retrieve a value from the key/value store
+ community.general.consul_kv:
+ key: somekey
+ register: retrieved_key
+
+- name: Add or update the value associated with a key in the key/value store
+ community.general.consul_kv:
+ key: somekey
+ value: somevalue
+
+- name: Remove a key from the store
+ community.general.consul_kv:
+ key: somekey
+ state: absent
+
+- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+ community.general.consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: top_secret
+
+- name: Register a key/value pair with an associated session
+ community.general.consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
+# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
+# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
+NOT_SET = None
+
+
+def _has_value_changed(consul_client, key, target_value):
+ """
+ Uses the given Consul client to determine if the value associated to the given key is different to the given target
+ value.
+ :param consul_client: Consul connected client
+ :param key: key in Consul
+ :param target_value: value to be associated to the key
+ :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
+ value has changed (i.e. the stored value is not the target value)
+ """
+ index, existing = consul_client.kv.get(key)
+ if not existing:
+ return index, True
+ try:
+ changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
+ return index, changed
+ except UnicodeError:
+ # Existing value was not decodable but all values we set are valid utf-8
+ return index, True
+
+
+def execute(module):
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ elif state == 'present':
+ if module.params.get('value') is NOT_SET:
+ get_value(module)
+ else:
+ set_value(module)
+ elif state == 'absent':
+ remove_value(module)
+ else:
+ module.exit_json(msg="Unsupported state: %s" % (state, ))
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def get_value(module):
+ consul_api = get_consul_api(module)
+ key = module.params.get('key')
+
+ index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
+
+ module.exit_json(changed=False, index=index, data=existing_value)
+
+
+def set_value(module):
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if value is NOT_SET:
+ raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ stored = None
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing is not None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cas=dict(type='str'),
+ flags=dict(type='str'),
+ key=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ port=dict(type='int', default=8500),
+ recurse=dict(type='bool'),
+ retrieve=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
+ token=dict(type='str', no_log=True),
+ value=dict(type='str', default=NOT_SET),
+ session=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py
new file mode 100644
index 00000000..f28d3a5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+author:
+- Steve Gargan (@sgargan)
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: True
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+'''
+
+EXAMPLES = '''
+- name: Register basic session with consul
+ community.general.consul_session:
+ name: session1
+
+- name: Register a session with an existing check
+ community.general.consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: Register a session with lock_delay
+ community.general.consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: Retrieve info about session by id
+ community.general.consul_session:
+ id: session_id
+ state: info
+
+- name: Retrieve active sessions
+ community.general.consul_session:
+ state: list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py
new file mode 100644
index 00000000..3b43b443
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Franck Cuny <franck@lumberjaph.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies.
+description:
+ - Manage Perl library dependencies.
+options:
+ name:
+ type: str
+ description:
+ - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ aliases: ["pkg"]
+ from_path:
+ type: path
+ description:
+ - The local directory from where to install
+ notest:
+ description:
+ - Do not run unit tests
+ type: bool
+ default: no
+ locallib:
+ description:
+ - Specify the install base to install modules
+ type: path
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use
+ type: str
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB
+ type: bool
+ default: no
+ installdeps:
+ description:
+ - Only install dependencies
+ type: bool
+ default: no
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ type: str
+ system_lib:
+ description:
+ - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
+ - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
+ type: bool
+ default: no
+ aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ type: path
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+author: "Franck Cuny (@fcuny)"
+'''
+
+EXAMPLES = '''
+- name: Install Dancer perl package
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install version 0.99_05 of the Plack perl package
+ community.general.cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
+
+- name: Install Dancer into the specified locallib
+ community.general.cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install perl dependencies from local directory
+ community.general.cpanm:
+ from_path: /srv/webapps/my_app/src/
+
+- name: Install Dancer perl package without running the unit tests in indicated locallib
+ community.general.cpanm:
+ name: Dancer
+ notest: True
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install Dancer perl package from a specific mirror
+ community.general.cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
+
+- name: Install Dancer perl package into the system root path
+ community.general.cpanm:
+ name: Dancer
+ system_lib: yes
+
+- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
+ community.general.cpanm:
+ name: Dancer
+ version: '1.0'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _is_package_installed(module, name, locallib, cpanm, version):
+ cmd = ""
+ if locallib:
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
+ res, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return res == 0
+
+
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
+ if from_path:
+ cmd = cpanm + " " + from_path
+ else:
+ cmd = cpanm + " " + name
+
+ if notest is True:
+ cmd = cmd + " -n"
+
+ if locallib is not None:
+ cmd = cmd + " -l " + locallib
+
+ if mirror is not None:
+ cmd = cmd + " --mirror " + mirror
+
+ if mirror_only is True:
+ cmd = cmd + " --mirror-only"
+
+ if installdeps is True:
+ cmd = cmd + " --installdeps"
+
+ if use_sudo is True:
+ cmd = cmd + " --sudo"
+
+ return cmd
+
+
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('cpanm', True)
+ return result
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, required=False, aliases=['pkg']),
+ from_path=dict(default=None, required=False, type='path'),
+ notest=dict(default=False, type='bool'),
+ locallib=dict(default=None, required=False, type='path'),
+ mirror=dict(default=None, required=False),
+ mirror_only=dict(default=False, type='bool'),
+ installdeps=dict(default=False, type='bool'),
+ system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[['name', 'from_path']],
+ )
+
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
+ mirror_only = module.params['mirror_only']
+ installdeps = module.params['installdeps']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
+
+ changed = False
+
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
+
+ if not installed:
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+
+ rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
+
+ if rc_cpanm != 0:
+ module.fail_json(msg=err_cpanm, cmd=cmd)
+
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
+ changed = True
+
+ module.exit_json(changed=changed, binary=cpanm, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py
new file mode 100644
index 00000000..a76f6a78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Cronvar Plugin: The goal of this plugin is to provide an idempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+
+# This module is based on the crontab module.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables.
+ - This module allows you to create, update, or delete cron variable definitions.
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value to set this variable to.
+ - Required if C(state=present).
+ type: str
+ insertafter:
+ description:
+ - If specified, the variable will be inserted after the variable specified.
+ - Used with C(state=present).
+ type: str
+ insertbefore:
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ type: str
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - This parameter defaults to C(root) when unset.
+ type: str
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
+ - With a leading C(/), this is taken as absolute.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ type: bool
+ default: no
+requirements:
+ - cron
+author:
+- Doug Luce (@dougluce)
+'''
+
+EXAMPLES = r'''
+- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
+ community.general.cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+
+- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
+ community.general.cronvar:
+ name: LEGACY
+ state: absent
+
+- name: Add a variable to a file under /etc/cron.d
+ community.general.cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
+'''
+
+import os
+import platform
+import pwd
+import re
+import shlex
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronVarError(Exception):
+ pass
+
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
+ ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ (var_name, _) = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+# ==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - community.general.cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # community.general.cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ user=dict(type='str'),
+ cron_file=dict(type='str'),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ backup=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variable")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py
new file mode 100644
index 00000000..9841a786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Steve <yo@groks.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ type: str
+ required: yes
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ if already present.
+ - Use I(absent) to remove a line with matching I(name).
+ - Use I(opts_present) to add options to those already present; options with
+ different values will be updated.
+ - Use I(opts_absent) to remove options from the existing set.
+ type: str
+ required: yes
+ choices: [ absent, opts_absent, opts_present, present ]
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=).
+ type: str
+ password:
+ description:
+ - Encryption password, the path to a file containing the password, or
+ C(-) or unset if the password should be entered at boot.
+ type: path
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ type: str
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab).
+ - This might be useful in a chroot environment.
+ type: path
+ default: /etc/crypttab
+author:
+- Steve (@groks)
+'''
+
+EXAMPLES = r'''
+- name: Set the options explicitly a device which must already exist
+ community.general.crypttab:
+ name: luks-home
+ state: present
+ opts: discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ community.general.crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ loop: '{{ ansible_mounts }}'
+ when: "'/dev/mapper/luks-' in {{ item.device }}"
+'''
+
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
+ backing_device=dict(type='str'),
+ password=dict(type='path'),
+ opts=dict(type='str'),
+ path=dict(type='path', default='/etc/crypttab')
+ ),
+ supports_check_mode=True,
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception as e:
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
+ exception=traceback.format_exc(), **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ self.line = self.line.rstrip('\n')
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ fields.append('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if k in self:
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if k in self:
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
new file mode 100644
index 00000000..27bfc1a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+"""short_description: Check or wait for migrations between nodes"""
+
+# Copyright: (c) 2018, Albert Autin
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: aerospike_migrations
+short_description: Check or wait for migrations between nodes
+description:
+ - This can be used to check for migrations in a cluster.
+ This makes it easy to do a rolling upgrade/update on Aerospike nodes.
+ - If waiting for migrations is not desired, simply just poll until
+ port 3000 if available or asinfo -v status returns ok
+author: "Albert Autin (@Alb0t)"
+options:
+ host:
+ description:
+ - Which host do we use as seed for info connection
+ required: False
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port)
+ required: False
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds)
+ required: False
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations"
+ consecutively before returning OK back to ansible?
+ required: False
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: False
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: False
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node
+ before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: True
+ type: bool
+ min_cluster_size:
+ description:
+ - Check will return bad until cluster size is met
+ or until tries is exhausted
+ required: False
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes
+ if something else is changing the cluster, we may want to fail
+ required: False
+ type: bool
+ default: True
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than
+ version 4.3, then the C(cluster-stable) info command will be used.
+ Inside this command, you can optionally specify what the target
+ cluster size is - but it is not necessary. You can still rely on
+ min_cluster_size if you don't want to use this option.
+ - If this option is specified on a cluster that has at least 1
+ host <4.3 then it will be ignored until the min version reaches
+ 4.3.
+ required: False
+ type: int
+'''
+EXAMPLES = '''
+# check for migrations on local node
+- name: Wait for migrations on local node before proceeding
+ community.general.aerospike_migrations:
+ host: "localhost"
+ connect_timeout: 2000
+ consecutive_good_checks: 5
+ sleep_between_checks: 15
+ tries_limit: 600
+ local_only: False
+
+# example playbook:
+---
+- name: Upgrade aerospike
+ hosts: all
+ become: true
+ serial: 1
+ tasks:
+ - name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - python
+ - python-pip
+ - python-setuptools
+ state: latest
+ - name: Setup aerospike
+ ansible.builtin.pip:
+ name: aerospike
+# check for migrations every (sleep_between_checks)
+# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
+# Will exit if any exception, which can be caused by bad nodes,
+# nodes not returning data, or other reasons.
+# Maximum runtime before giving up in this case will be:
+# Tries Limit * Sleep Between Checks * delay * retries
+ - name: Wait for aerospike migrations
+ community.general.aerospike_migrations:
+ local_only: True
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
+ register: migrations_check
+ until: migrations_check is succeeded
+ changed_when: false
+ delay: 60
+ retries: 120
+ - name: Another thing
+ ansible.builtin.shell: |
+ echo foo
+ - name: Reboot
+ ansible.builtin.reboot:
+'''
+
+RETURN = '''
+# Returns only a success/failure result. Changed is always false.
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+LIB_FOUND_ERR = None
+try:
+ import aerospike
+ from time import sleep
+ import re
+except ImportError as ie:
+ LIB_FOUND = False
+ LIB_FOUND_ERR = traceback.format_exc()
+else:
+ LIB_FOUND = True
+
+
+def run_module():
+ """run ansible module"""
+ module_args = dict(
+ host=dict(type='str', required=False, default='localhost'),
+ port=dict(type='int', required=False, default=3000),
+ connect_timeout=dict(type='int', required=False, default=1000),
+ consecutive_good_checks=dict(type='int', required=False, default=3),
+ sleep_between_checks=dict(type='int', required=False, default=60),
+ tries_limit=dict(type='int', required=False, default=300),
+ local_only=dict(type='bool', required=True),
+ min_cluster_size=dict(type='int', required=False, default=1),
+ target_cluster_size=dict(type='int', required=False, default=None),
+ fail_on_cluster_change=dict(type='bool', required=False, default=True),
+ migrate_tx_key=dict(type='str', required=False,
+ default="migrate_tx_partitions_remaining"),
+ migrate_rx_key=dict(type='str', required=False,
+ default="migrate_rx_partitions_remaining")
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not LIB_FOUND:
+ module.fail_json(msg=missing_required_lib('aerospike'),
+ exception=LIB_FOUND_ERR)
+
+ try:
+ if module.check_mode:
+ has_migrations, skip_reason = False, None
+ else:
+ migrations = Migrations(module)
+ has_migrations, skip_reason = migrations.has_migs(
+ module.params['local_only']
+ )
+
+ if has_migrations:
+ module.fail_json(msg="Failed.", skip_reason=skip_reason)
+ except Exception as e:
+ module.fail_json(msg="Error: {0}".format(e))
+
+ module.exit_json(**result)
+
+
+class Migrations:
+ """ Check or wait for migrations between nodes """
+
+ def __init__(self, module):
+ self.module = module
+ self._client = self._create_client().connect()
+ self._nodes = {}
+ self._update_nodes_list()
+ self._cluster_statistics = {}
+ self._update_cluster_statistics()
+ self._namespaces = set()
+ self._update_cluster_namespace_list()
+ self._build_list = set()
+ self._update_build_list()
+ self._start_cluster_key = \
+ self._cluster_statistics[self._nodes[0]]['cluster_key']
+
+ def _create_client(self):
+ """ TODO: add support for auth, tls, and other special features
+ I won't use those features, so I'll wait until somebody complains
+ or does it for me (Cross fingers)
+ create the client object"""
+ config = {
+ 'hosts': [
+ (self.module.params['host'], self.module.params['port'])
+ ],
+ 'policies': {
+ 'timeout': self.module.params['connect_timeout']
+ }
+ }
+ return aerospike.client(config)
+
+ def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
+ """delimiter is for separate stats that come back, NOT for kv
+ separation which is ="""
+ if node is None: # If no node passed, use the first one (local)
+ node = self._nodes[0]
+ data = self._client.info_node(cmd, node)
+ data = data.split("\t")
+ if len(data) != 1 and len(data) != 2:
+ self.module.fail_json(
+ msg="Unexpected number of values returned in info command: " +
+ str(len(data))
+ )
+ # data will be in format 'command\touput'
+ data = data[-1]
+ data = data.rstrip("\n\r")
+ data_arr = data.split(delimiter)
+
+ # some commands don't return in kv format
+ # so we dont want a dict from those.
+ if '=' in data:
+ retval = dict(
+ metric.split("=", 1) for metric in data_arr
+ )
+ else:
+ # if only 1 element found, and not kv, return just the value.
+ if len(data_arr) == 1:
+ retval = data_arr[0]
+ else:
+ retval = data_arr
+ return retval
+
+ def _update_build_list(self):
+ """creates self._build_list which is a unique list
+ of build versions."""
+ self._build_list = set()
+ for node in self._nodes:
+ build = self._info_cmd_helper('build', node)
+ self._build_list.add(build)
+
+ # just checks to see if the version is 4.3 or greater
+ def _can_use_cluster_stable(self):
+ # if version <4.3 we can't use cluster-stable info cmd
+ # regex hack to check for versions beginning with 0-3 or
+ # beginning with 4.0,4.1,4.2
+ if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
+ return False
+ return True
+
+ def _update_cluster_namespace_list(self):
+ """ make a unique list of namespaces
+ TODO: does this work on a rolling namespace add/deletion?
+ thankfully if it doesn't, we dont need this on builds >=4.3"""
+ self._namespaces = set()
+ for node in self._nodes:
+ namespaces = self._info_cmd_helper('namespaces', node)
+ for namespace in namespaces:
+ self._namespaces.add(namespace)
+
+ def _update_cluster_statistics(self):
+ """create a dict of nodes with their related stats """
+ self._cluster_statistics = {}
+ for node in self._nodes:
+ self._cluster_statistics[node] = \
+ self._info_cmd_helper('statistics', node)
+
+ def _update_nodes_list(self):
+ """get a fresh list of all the nodes"""
+ self._nodes = self._client.get_nodes()
+ if not self._nodes:
+ self.module.fail_json("Failed to retrieve at least 1 node.")
+
+ def _namespace_has_migs(self, namespace, node=None):
+ """returns a True or False.
+ Does the namespace have migrations for the node passed?
+ If no node passed, uses the local node or the first one in the list"""
+ namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
+ try:
+ namespace_tx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ namespace_rx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ except KeyError:
+ self.module.fail_json(
+ msg="Did not find partition remaining key:" +
+ self.module.params['migrate_tx_key'] +
+ " or key:" +
+ self.module.params['migrate_rx_key'] +
+ " in 'namespace/" +
+ namespace +
+ "' output."
+ )
+ except TypeError:
+ self.module.fail_json(
+ msg="namespace stat returned was not numerical"
+ )
+ return namespace_tx != 0 or namespace_rx != 0
+
+ def _node_has_migs(self, node=None):
+ """just calls namespace_has_migs and
+ if any namespace has migs returns true"""
+ migs = 0
+ self._update_cluster_namespace_list()
+ for namespace in self._namespaces:
+ if self._namespace_has_migs(namespace, node):
+ migs += 1
+ return migs != 0
+
+ def _cluster_key_consistent(self):
+ """create a dictionary to store what each node
+ returns the cluster key as. we should end up with only 1 dict key,
+ with the key being the cluster key."""
+ cluster_keys = {}
+ for node in self._nodes:
+ cluster_key = self._cluster_statistics[node][
+ 'cluster_key']
+ if cluster_key not in cluster_keys:
+ cluster_keys[cluster_key] = 1
+ else:
+ cluster_keys[cluster_key] += 1
+ if len(cluster_keys.keys()) == 1 and \
+ self._start_cluster_key in cluster_keys:
+ return True
+ return False
+
+ def _cluster_migrates_allowed(self):
+ """ensure all nodes have 'migrate_allowed' in their stats output"""
+ for node in self._nodes:
+ node_stats = self._info_cmd_helper('statistics', node)
+ allowed = node_stats['migrate_allowed']
+ if allowed == "false":
+ return False
+ return True
+
+ def _cluster_has_migs(self):
+ """calls node_has_migs for each node"""
+ migs = 0
+ for node in self._nodes:
+ if self._node_has_migs(node):
+ migs += 1
+ if migs == 0:
+ return False
+ return True
+
+ def _has_migs(self, local):
+ if local:
+ return self._local_node_has_migs()
+ return self._cluster_has_migs()
+
+ def _local_node_has_migs(self):
+ return self._node_has_migs(None)
+
+ def _is_min_cluster_size(self):
+ """checks that all nodes in the cluster are returning the
+ minimum cluster size specified in their statistics output"""
+ sizes = set()
+ for node in self._cluster_statistics:
+ sizes.add(int(self._cluster_statistics[node]['cluster_size']))
+
+ if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
+ return False
+ if (min(sizes)) >= self.module.params['min_cluster_size']:
+ return True
+ return False
+
+ def _cluster_stable(self):
+ """Added 4.3:
+ cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
+ Returns the current 'cluster_key' when the following are satisfied:
+
+ If 'size' is specified then the target node's 'cluster-size'
+ must match size.
+ If 'ignore-migrations' is either unspecified or 'false' then
+ the target node's migrations counts must be zero for the provided
+ 'namespace' or all namespaces if 'namespace' is not provided."""
+ cluster_key = set()
+ cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
+ cmd = "cluster-stable:"
+ target_cluster_size = self.module.params['target_cluster_size']
+ if target_cluster_size is not None:
+ cmd = cmd + "size=" + str(target_cluster_size) + ";"
+ for node in self._nodes:
+ try:
+ cluster_key.add(self._info_cmd_helper(cmd, node))
+ except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
+ if 'unstable-cluster' in e.msg:
+ return False
+ raise e
+ if len(cluster_key) == 1:
+ return True
+ return False
+
+ def _cluster_good_state(self):
+ """checks a few things to make sure we're OK to say the cluster
+ has no migs. It could be in a unhealthy condition that does not allow
+ migs, or a split brain"""
+ if self._cluster_key_consistent() is not True:
+ return False, "Cluster key inconsistent."
+ if self._is_min_cluster_size() is not True:
+ return False, "Cluster min size not reached."
+ if self._cluster_migrates_allowed() is not True:
+ return False, "migrate_allowed is false somewhere."
+ return True, "OK."
+
+ def has_migs(self, local=True):
+ """returns a boolean, False if no migrations otherwise True"""
+ consecutive_good = 0
+ try_num = 0
+ skip_reason = list()
+ while \
+ try_num < int(self.module.params['tries_limit']) and \
+ consecutive_good < \
+ int(self.module.params['consecutive_good_checks']):
+
+ self._update_nodes_list()
+ self._update_cluster_statistics()
+
+ # These checks are outside of the while loop because
+ # we probably want to skip & sleep instead of failing entirely
+ stable, reason = self._cluster_good_state()
+ if stable is not True:
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + reason
+ )
+ else:
+ if self._can_use_cluster_stable():
+ if self._cluster_stable():
+ consecutive_good += 1
+ else:
+ consecutive_good = 0
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " cluster_stable"
+ )
+ elif self._has_migs(local):
+ # print("_has_migs")
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " migrations"
+ )
+ consecutive_good = 0
+ else:
+ consecutive_good += 1
+ if consecutive_good == self.module.params[
+ 'consecutive_good_checks']:
+ break
+ try_num += 1
+ sleep(self.module.params['sleep_between_checks'])
+ # print(skip_reason)
+ if consecutive_good == self.module.params['consecutive_good_checks']:
+ return False, None
+ return True, skip_reason
+
+
+def main():
+ """main method for ansible module"""
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
new file mode 100644
index 00000000..7b798c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ state:
+ description:
+ - Determines if the database should be created or destroyed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+
+- name: Destroy database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ ssl: yes
+ validate_certs: yes
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ database_name = influxdb.database_name
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
new file mode 100644
index 00000000..d9cf5007
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_query
+short_description: Query data points from InfluxDB
+description:
+ - Query data points from InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ query:
+ description:
+ - Query to be executed.
+ required: true
+ type: str
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Query connections
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections"
+ register: connection
+
+- name: Query connections with tags filters
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections where region='zue01' and host='server01'"
+ register: connection
+
+- name: Print results from the query
+ ansible.builtin.debug:
+ var: connection.query_results
+'''
+
+RETURN = r'''
+query_results:
+ description: Result from the query
+ returned: success
+ type: list
+ sample:
+ - mean: 1245.5333333333333
+ time: "1970-01-01T00:00:00Z"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBRead(InfluxDb):
+
+ def read_by_query(self, query):
+ client = self.connect_to_influxdb()
+ try:
+ rs = client.query(query)
+ if rs:
+ return list(rs.get_points())
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ query=dict(type='str', required=True),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influx = AnsibleInfluxDBRead(module)
+ query = module.params.get('query')
+ results = influx.read_by_query(query)
+ module.exit_json(changed=True, query_results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
new file mode 100644
index 00000000..0774915f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of the retention policy.
+ required: true
+ type: str
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data.
+ required: true
+ type: str
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster.
+ required: true
+ type: int
+ default:
+ description:
+ - Sets the retention policy as default retention policy.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: Create 1 hour retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+ ssl: yes
+ validate_certs: yes
+
+- name: Create 1 day retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+
+- name: Create 1 week retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+
+- name: Create infinite retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: INF
+ replication: 1
+ ssl: no
+ validate_certs: no
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+import re
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+from ansible.module_utils._text import to_native
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ hostname = module.params['hostname']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+
+ if not module.check_mode:
+ try:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
+ changed = False
+
+ duration_lookup = duration_regexp.search(duration)
+
+ if duration_lookup.group(2) == 'h':
+ influxdb_duration_format = '%s0m0s' % duration
+ elif duration_lookup.group(2) == 'd':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
+ elif duration_lookup.group(2) == 'w':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
+ elif duration == 'INF':
+ influxdb_duration_format = '0'
+
+ if (not retention_policy['duration'] == influxdb_duration_format or
+ not retention_policy['replicaN'] == int(replication) or
+ not retention_policy['default'] == default):
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ duration=dict(required=True, type='str'),
+ replication=dict(required=True, type='int'),
+ default=dict(default=False, type='bool')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ retention_policy = find_retention_policy(module, client)
+
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
new file mode 100644
index 00000000..e17e3753
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
+# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_user
+short_description: Manage InfluxDB users
+description:
+ - Manage InfluxDB users.
+author: "Vitaliy Zhhuta (@zhhuta)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ user_name:
+ description:
+ - Name of the user.
+ required: True
+ type: str
+ user_password:
+ description:
+ - Password to be set for the user.
+ required: false
+ type: str
+ admin:
+ description:
+ - Whether the user should be in the admin role or not.
+ - Since version 2.8, the role will also be updated.
+ default: no
+ type: bool
+ state:
+ description:
+ - State of the user.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ grants:
+ description:
+ - Privileges to grant to this user.
+ - Takes a list of dicts containing the "database" and "privilege" keys.
+ - If this argument is not provided, the current grants will be left alone.
+ - If an empty list is provided, all grants for the user will be removed.
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Create a user on localhost using default login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+
+- name: Create a user on localhost using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create an admin user on a remote host using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ admin: yes
+ hostname: "{{ influxdb_hostname }}"
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create a user on localhost with privileges
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ grants:
+ - database: 'collectd'
+ privilege: 'WRITE'
+ - database: 'graphite'
+ privilege: 'READ'
+
+- name: Destroy a user using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ state: absent
+'''
+
+RETURN = r'''
+#only defaults
+'''
+
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils.influxdb as influx
+
+
+def find_user(module, client, user_name):
+ user_result = None
+
+ try:
+ users = client.get_list_users()
+ for user in users:
+ if user['user'] == user_name:
+ user_result = user
+ break
+ except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
+ module.fail_json(msg=to_native(e))
+ return user_result
+
+
+def check_user_password(module, client, user_name, user_password):
+ try:
+ client.switch_user(user_name, user_password)
+ client.get_list_users()
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 401:
+ return False
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ finally:
+ # restore previous user
+ client.switch_user(module.params['username'], module.params['password'])
+ return True
+
+
+def set_user_password(module, client, user_name, user_password):
+ if not module.check_mode:
+ try:
+ client.set_user_password(user_name, user_password)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def create_user(module, client, user_name, user_password, admin):
+ if not module.check_mode:
+ try:
+ client.create_user(user_name, user_password, admin)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def drop_user(module, client, user_name):
+ if not module.check_mode:
+ try:
+ client.drop_user(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def set_user_grants(module, client, user_name, grants):
+ changed = False
+
+ try:
+ current_grants = client.get_list_privileges(user_name)
+ # Fix privileges wording
+ for i, v in enumerate(current_grants):
+ if v['privilege'] == 'ALL PRIVILEGES':
+ v['privilege'] = 'ALL'
+ current_grants[i] = v
+ elif v['privilege'] == 'NO PRIVILEGES':
+ del(current_grants[i])
+
+ # check if the current grants are included in the desired ones
+ for current_grant in current_grants:
+ if current_grant not in grants:
+ if not module.check_mode:
+ client.revoke_privilege(current_grant['privilege'],
+ current_grant['database'],
+ user_name)
+ changed = True
+
+ # check if the desired grants are included in the current ones
+ for grant in grants:
+ if grant not in current_grants:
+ if not module.check_mode:
+ client.grant_privilege(grant['privilege'],
+ grant['database'],
+ user_name)
+ changed = True
+
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ return changed
+
+
+def main():
+ argument_spec = influx.InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ admin=dict(default='False', type='bool'),
+ grants=dict(type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ user_name = module.params['user_name']
+ user_password = module.params['user_password']
+ admin = module.params['admin']
+ grants = module.params['grants']
+ influxdb = influx.InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ user = find_user(module, client, user_name)
+
+ changed = False
+
+ if state == 'present':
+ if user:
+ if not check_user_password(module, client, user_name, user_password) and user_password is not None:
+ set_user_password(module, client, user_name, user_password)
+ changed = True
+
+ try:
+ if admin and not user['admin']:
+ if not module.check_mode:
+ client.grant_admin_privileges(user_name)
+ changed = True
+ elif not admin and user['admin']:
+ if not module.check_mode:
+ client.revoke_admin_privileges(user_name)
+ changed = True
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=to_native(e))
+
+ else:
+ user_password = user_password or ''
+ create_user(module, client, user_name, user_password, admin)
+ changed = True
+
+ if grants is not None:
+ if set_user_grants(module, client, user_name, grants):
+ changed = True
+
+ module.exit_json(changed=changed)
+
+ if state == 'absent':
+ if user:
+ drop_user(module, client, user_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py
new file mode 100644
index 00000000..0dc063a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_write
+short_description: Write data points into InfluxDB
+description:
+ - Write data points into InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ data_points:
+ description:
+ - Data points as dict to write into the database.
+ required: true
+ type: list
+ elements: dict
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Write points into database
+ community.general.influxdb_write:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ data_points:
+ - measurement: connections
+ tags:
+ host: server01
+ region: us-west
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 2000
+ - measurement: connections
+ tags:
+ host: server02
+ region: us-east
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 3000
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBWrite(InfluxDb):
+
+ def write_data_point(self, data_points):
+ client = self.connect_to_influxdb()
+
+ try:
+ client.write_points(data_points)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ data_points=dict(required=True, type='list', elements='dict'),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ influx = AnsibleInfluxDBWrite(module)
+ data_points = module.params.get('data_points')
+ influx.write_data_point(data_points)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py
new file mode 100644
index 00000000..27a67406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+author:
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a file://
+ URL to install from a local file, or a remote URL. If this is not set, the plugin
+ location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name
+ parameter. If, for example, the plugin is already installed, changing this has no
+ effect.
+ - For ES 1.x use url.
+ required: False
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: False
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ force:
+ description:
+ - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
+ default: False
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - The default changed in Ansible 2.4 to None.
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: present
+
+- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ version: 2.0.0
+
+- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: absent
+
+- name: Install a specific plugin in Elasticsearch >= 5.0
+ community.general.elasticsearch_plugin:
+ name: analysis-icu
+ state: present
+
+- name: Install the ingest-geoip plugin with a forced installation
+ community.general.elasticsearch_plugin:
+ name: ingest-geoip
+ state: present
+ force: yes
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+PLUGIN_BIN_PATHS = tuple([
+ '/usr/share/elasticsearch/bin/elasticsearch-plugin',
+ '/usr/share/elasticsearch/bin/plugin'
+])
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_name, plugin_dir):
+ return os.path.isdir(os.path.join(plugin_dir, plugin_name))
+
+
+def parse_error(string):
+ reason = "ERROR: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
+ is_old_command = (os.path.basename(plugin_bin) == 'plugin')
+
+ # Timeout and version are only valid for plugin, not elasticsearch-plugin
+ if is_old_command:
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ if version:
+ plugin_name = plugin_name + '/' + version
+ cmd_args[2] = plugin_name
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ # Legacy ES 1.x
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if force:
+ cmd_args.append("--batch")
+ if src:
+ cmd_args.append(src)
+ else:
+ cmd_args.append(plugin_name)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def get_plugin_bin(module, plugin_bin=None):
+ # Use the plugin_bin that was supplied first before trying other options
+ valid_plugin_bin = None
+ if plugin_bin and os.path.isfile(plugin_bin):
+ valid_plugin_bin = plugin_bin
+
+ else:
+ # Add the plugin_bin passed into the module to the top of the list of paths to test,
+ # testing for that binary name first before falling back to the default paths.
+ bin_paths = list(PLUGIN_BIN_PATHS)
+ if plugin_bin and plugin_bin not in bin_paths:
+ bin_paths.insert(0, plugin_bin)
+
+ # Get separate lists of dirs and binary names from the full paths to the
+ # plugin binaries.
+ plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
+ plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
+
+ # Check for the binary names in the default system paths as well as the path
+ # specified in the module arguments.
+ for bin_file in plugin_bins:
+ valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
+ if valid_plugin_bin:
+ break
+
+ if not valid_plugin_bin:
+ module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
+
+ return valid_plugin_bin
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ src=dict(default=None),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ force=dict(type='bool', default=False),
+ plugin_bin=dict(type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ mutually_exclusive=[("src", "url")],
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ src = module.params["src"]
+ timeout = module.params["timeout"]
+ force = module.params["force"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ # Search provided path and system paths for valid binary
+ plugin_bin = get_plugin_bin(module, plugin_bin)
+
+ repo = parse_plugin_repo(name)
+ present = is_plugin_present(repo, plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py
new file mode 100644
index 00000000..e84d8a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Thierno IB. BARRY @barryib
+# Sponsored by Polyconseil http://polyconseil.fr.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - This module can be used to manage Kibana plugins.
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ - For local file, prefix its absolute path with file://
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h etc."
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the Kibana binary.
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana.
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update.
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+
+- name: Install specific version of a plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+- name: Uninstall Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin management (install / remove)
+ returned: success
+ type: str
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: str
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: str
+timeout:
+ description: the timeout for plugin download
+ returned: success
+ type: str
+stdout:
+ description: the command stdout
+ returned: success
+ type: str
+stderr:
+ description: the command stderr
+ returned: success
+ type: str
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: str
+'''
+
+import os
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "install"]
+ if url:
+ cmd_args.append(url)
+ else:
+ cmd_args.append(plugin_name)
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "remove", plugin_name]
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def get_kibana_version(module, plugin_bin):
+ cmd_args = [plugin_bin, '--version']
+ cmd = " ".join(cmd_args)
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to get Kibana version : %s" % err)
+
+ return out.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ changed, cmd, out, err = False, '', '', ''
+
+ kibana_version = get_kibana_version(module, plugin_bin)
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if version:
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py
new file mode 100644
index 00000000..313a7f70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Westcott <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: odbc
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "1.0.0"
+short_description: Execute SQL via ODBC
+description:
+ - Read/Write info via ODBC drivers.
+options:
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: yes
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: yes
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is C(true) to support legacy module behavior.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+requirements:
+ - "python >= 2.6"
+ - "pyodbc"
+
+notes:
+ - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
+ - "To alter this behavior you can use C(changed_when): [yes or no]."
+ - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
+'''
+
+EXAMPLES = '''
+- name: Set some values in the test db
+ community.general.odbc:
+ dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
+ query: "Select * from table_a where column1 = ?"
+ params:
+ - "value1"
+ commit: false
+ changed_when: no
+'''
+
+RETURN = '''
+results:
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
+description:
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
+row_count:
+ description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+HAS_PYODBC = None
+try:
+ import pyodbc
+ HAS_PYODBC = True
+except ImportError as e:
+ HAS_PYODBC = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dsn=dict(type='str', required=True, no_log=True),
+ query=dict(type='str', required=True),
+ params=dict(type='list', elements='str'),
+ commit=dict(type='bool', default=True),
+ ),
+ )
+
+ dsn = module.params.get('dsn')
+ query = module.params.get('query')
+ params = module.params.get('params')
+ commit = module.params.get('commit')
+
+ if not HAS_PYODBC:
+ module.fail_json(msg=missing_required_lib('pyodbc'))
+
+ # Try to make a connection with the DSN
+ connection = None
+ try:
+ connection = pyodbc.connect(dsn)
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
+
+ result = dict(
+ changed=True,
+ description=[],
+ row_count=-1,
+ results=[],
+ )
+
+ try:
+ cursor = connection.cursor()
+
+ if params:
+ cursor.execute(query, params)
+ else:
+ cursor.execute(query)
+ if commit:
+ cursor.commit()
+ try:
+ # Get the rows out into an 2d array
+ for row in cursor.fetchall():
+ new_row = []
+ for column in row:
+ new_row.append("{0}".format(column))
+ result['results'].append(new_row)
+
+ # Return additional information from the cursor
+ for row_description in cursor.description:
+ description = {}
+ description['name'] = row_description[0]
+ description['type'] = row_description[1].__name__
+ description['display_size'] = row_description[2]
+ description['internal_size'] = row_description[3]
+ description['precision'] = row_description[4]
+ description['scale'] = row_description[5]
+ description['nullable'] = row_description[6]
+ result['description'].append(description)
+
+ result['row_count'] = cursor.rowcount
+ except pyodbc.ProgrammingError as pe:
+ pass
+ except Exception as e:
+ module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
+
+ cursor.close()
+ except Exception as e:
+ module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
+ finally:
+ connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py
new file mode 100644
index 00000000..5ffbd7db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, slave and flush
+description:
+ - Unified utility to interact with redis instances.
+options:
+ command:
+ description:
+ - The selected redis command
+ - C(config) (new in 1.6), ensures a configuration setting on an instance.
+ - C(flush) flushes all the instance or a specified db.
+ - C(slave) sets a redis instance in slave or master mode.
+ choices: [ config, flush, slave ]
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with (usually not used)
+ type: str
+ login_host:
+ description:
+ - The host running the database
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The port to connect to
+ default: 6379
+ type: int
+ master_host:
+ description:
+ - The host of the master instance [slave command]
+ type: str
+ master_port:
+ description:
+ - The port of the master instance [slave command]
+ type: int
+ slave_mode:
+ description:
+ - the mode of the redis instance [slave command]
+ default: slave
+ choices: [ master, slave ]
+ type: str
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ type: int
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ default: all
+ choices: [ all, db ]
+ type: str
+ name:
+ description:
+ - A redis config key.
+ type: str
+ value:
+ description:
+ - A redis config value. When memory size is needed, it is possible
+ to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
+ type: str
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making slave of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+seealso:
+ - module: community.general.redis_info
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+- name: Set local redis instance to be slave of melee.island on port 6377
+ community.general.redis:
+ command: slave
+ master_host: melee.island
+ master_port: 6377
+
+- name: Deactivate slave mode
+ community.general.redis:
+ command: slave
+ slave_mode: master
+
+- name: Flush all the redis db
+ community.general.redis:
+ command: flush
+ flush_mode: all
+
+- name: Flush only one db in a redis instance
+ community.general.redis:
+ command: flush
+ db: 1
+ flush_mode: db
+
+- name: Configure local redis to have 10000 max clients
+ community.general.redis:
+ command: config
+ name: maxclients
+ value: 10000
+
+- name: Configure local redis maxmemory to 4GB
+ community.general.redis:
+ command: config
+ name: maxmemory
+ value: 4GB
+
+- name: Configure local redis to have lua time limit of 100 ms
+ community.general.redis:
+ command: config
+ name: lua-time-limit
+ value: 100
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ import redis
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ redis_found = False
+else:
+ redis_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils._text import to_native
+import re
+
+
+# Redis module specific support methods.
+def set_slave_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if not isinstance(db, int):
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(type='str', choices=['config', 'flush', 'slave']),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ master_host=dict(type='str'),
+ master_port=dict(type='int'),
+ slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
+ db=dict(type='int'),
+ flush_mode=dict(type='str', default='all', choices=['all', 'db']),
+ name=dict(type='str'),
+ value=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ if not redis_found:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ command = module.params['command']
+
+ # Slave Command section -----------
+ if command == "slave":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['slave_mode']
+
+ # Check if we have all the data
+ if mode == "slave": # Only need data if we want to be slave
+ if not master_host:
+ module.fail_json(msg='In slave mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(msg='In slave mode master port must be provided')
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
+ status = dict(
+ status=mode,
+ master_host=master_host,
+ master_port=master_port,
+ )
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "slave":
+ if module.check_mode or\
+ set_slave_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set slave mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ # Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(msg="In db mode the db number must be provided")
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+
+ try: # try to parse the value as if it were the memory size
+ if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
+ value = str(human_to_bytes(module.params['value'].upper()))
+ else:
+ value = module.params['value']
+ except ValueError:
+ value = module.params['value']
+
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception as e:
+ module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception as e:
+ module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py
new file mode 100644
index 00000000..b615addb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redis_info
+short_description: Gather information about Redis servers
+version_added: '0.2.0'
+description:
+- Gathers information and statistics about Redis servers.
+options:
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to.
+ type: int
+ default: 6379
+ login_password:
+ description:
+ - The password used to authenticate with, when authentication is enabled for the Redis server.
+ type: str
+notes:
+- Requires the redis-py Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ U(https://github.com/andymccurdy/redis-py)
+seealso:
+- module: community.general.redis
+requirements: [ redis ]
+author: "Pavlo Bashynskyi (@levonet)"
+'''
+
+EXAMPLES = r'''
+- name: Get server information
+ community.general.redis_info:
+ register: result
+
+- name: Print server information
+ ansible.builtin.debug:
+ var: result.info
+'''
+
+RETURN = r'''
+info:
+ description: The default set of server information sections U(https://redis.io/commands/info).
+ returned: success
+ type: dict
+ sample: {
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 932409344,
+ "allocator_allocated": 932062792,
+ "allocator_frag_bytes": 346552,
+ "allocator_frag_ratio": 1.0,
+ "allocator_resident": 947253248,
+ "allocator_rss_bytes": 14843904,
+ "allocator_rss_ratio": 1.02,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_enabled": 0,
+ "aof_last_bgrewrite_status": "ok",
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_last_write_status": "ok",
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "atomicvar_api": "atomic-builtin",
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 4,
+ "client_recent_max_output_buffer": 0,
+ "cluster_enabled": 0,
+ "config_file": "",
+ "configured_hz": 10,
+ "connected_clients": 4,
+ "connected_slaves": 0,
+ "db0": {
+ "avg_ttl": 1945628530,
+ "expires": 16,
+ "keys": 3341411
+ },
+ "evicted_keys": 0,
+ "executable": "/data/redis-server",
+ "expired_keys": 9,
+ "expired_stale_perc": 1.72,
+ "expired_time_cap_reached_count": 0,
+ "gcc_version": "9.2.0",
+ "hz": 10,
+ "instantaneous_input_kbps": 0.0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0.0,
+ "keyspace_hits": 0,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 0,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 11603632,
+ "master_repl_offset": 118831417,
+ "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e",
+ "master_replid2": "0000000000000000000000000000000000000000",
+ "maxmemory": 0,
+ "maxmemory_human": "0B",
+ "maxmemory_policy": "noeviction",
+ "mem_allocator": "jemalloc-5.1.0",
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 49694,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 12355480,
+ "mem_fragmentation_ratio": 1.01,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 1048576,
+ "migrate_cached_sockets": 0,
+ "multiplexing_api": "epoll",
+ "number_of_cached_scripts": 0,
+ "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64",
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 671,
+ "rdb_current_bgsave_time_sec": -1,
+ "rdb_last_bgsave_status": "ok",
+ "rdb_last_bgsave_time_sec": -1,
+ "rdb_last_cow_size": 0,
+ "rdb_last_save_time": 1588702236,
+ "redis_build_id": "a31260535f820267",
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "redis_mode": "standalone",
+ "redis_version": "999.999.999",
+ "rejected_connections": 0,
+ "repl_backlog_active": 1,
+ "repl_backlog_first_byte_offset": 118707937,
+ "repl_backlog_histlen": 123481,
+ "repl_backlog_size": 1048576,
+ "role": "master",
+ "rss_overhead_bytes": -3051520,
+ "rss_overhead_ratio": 1.0,
+ "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4",
+ "second_repl_offset": 118830003,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 885,
+ "total_connections_received": 10,
+ "total_net_input_bytes": 802709255,
+ "total_net_output_bytes": 31754,
+ "total_system_memory": 135029538816,
+ "total_system_memory_human": "125.76G",
+ "uptime_in_days": 53,
+ "uptime_in_seconds": 4631778,
+ "used_cpu_sys": 4.668282,
+ "used_cpu_sys_children": 0.002191,
+ "used_cpu_user": 4.21088,
+ "used_cpu_user_children": 0.0,
+ "used_memory": 931908760,
+ "used_memory_dataset": 910774306,
+ "used_memory_dataset_perc": "97.82%",
+ "used_memory_human": "888.74M",
+ "used_memory_lua": 37888,
+ "used_memory_lua_human": "37.00K",
+ "used_memory_overhead": 21134454,
+ "used_memory_peak": 932015216,
+ "used_memory_peak_human": "888.84M",
+ "used_memory_peak_perc": "99.99%",
+ "used_memory_rss": 944201728,
+ "used_memory_rss_human": "900.46M",
+ "used_memory_scripts": 0,
+ "used_memory_scripts_human": "0B",
+ "used_memory_startup": 791264
+ }
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import StrictRedis
+ HAS_REDIS_PACKAGE = True
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def redis_client(**client_params):
+ return StrictRedis(**client_params)
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ login_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_REDIS_PACKAGE:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_password = module.params['login_password']
+
+ # Connect and check
+ client = redis_client(host=login_host, port=login_port, password=login_password)
+ try:
+ client.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ info = client.info()
+ module.exit_json(changed=False, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py
new file mode 100644
index 00000000..848a5e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ default: /etc/riak
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ default: 127.0.0.1:8098
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ default: riak@127.0.0.1
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: "Join's a Riak node to another node"
+ community.general.riak:
+ command: join
+ target_node: riak@10.1.1.1
+
+- name: Wait for handoffs to finish. Use with async and poll.
+ community.general.riak:
+ wait_for_handoffs: yes
+
+- name: Wait for riak_kv service to startup
+ community.general.riak:
+ wait_for_service: kv
+'''
+
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=False, type='int'),
+ wait_for_ring=dict(default=False, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs=dict(default=True, type='bool'))
+ )
+
+ command = module.params.get('command')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+
+ # make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except Exception:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'])
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % (riak_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py
new file mode 100644
index 00000000..e6c5f183
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host.
+description:
+ - Add or remove MSSQL databases from a remote host.
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ aliases: [ db ]
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with
+ type: str
+ login_host:
+ description:
+ - Host running the database
+ type: str
+ required: true
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ default: '1433'
+ type: str
+ state:
+ description:
+ - The database state
+ default: present
+ choices: [ "present", "absent", "import" ]
+ type: str
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ type: str
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
+ type: bool
+ default: 'no'
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig (@vedit)
+'''
+
+EXAMPLES = '''
+- name: Create a new database with name 'jackdata'
+ community.general.mssql_db:
+ name: jackdata
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file to remote host
+ ansible.builtin.copy:
+ src: dump.sql
+ dest: /tmp
+
+- name: Restore the dump file to database 'my_db'
+ community.general.mssql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import traceback
+
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ mssql_found = False
+else:
+ mssql_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except Exception:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ with open(target, 'r') as backup:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py
new file mode 100644
index 00000000..bf66f3d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: no
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+- if I(check_mode=yes), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+- If i(check_mode=yes) and the source has been passed as SQL, the module
+ will execute it and rolled the transaction back but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.general.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.general.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.general.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: yes
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.general.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py
new file mode 100644
index 00000000..8fde39ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.general.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ community.general.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.general.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.general.postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py
new file mode 100644
index 00000000..3fa82dac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: no
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified, the latest extension version will be created.
+ - It can't downgrade an extension version.
+ When version downgrade is needed, remove the extension and create new one with appropriate version.
+ - Set I(version=latest) to update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+notes:
+- The default authentication assumes that you are either logging in as
+ or sudo'ing to the C(postgres) account on the host.
+- This module uses I(psycopg2), a Python PostgreSQL database adapter.
+- You must ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case),
+ then PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
+ and C(python-psycopg2) packages on the remote host before using this module.
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+requirements: [ psycopg2 ]
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.general.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.general.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: yes
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it if it's already created
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Assuming extension foo is created, update it to the latest version
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+from distutils.version import LooseVersion
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+
+def ext_delete(cursor, ext, cascade):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version:
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the current created extension version and available versions.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ current_version = '0'
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchall()
+
+ available_versions = parse_ext_versions(current_version, res)
+
+ if current_version == '0':
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def parse_ext_versions(current_version, ext_ver_list):
+ """Parse ext versions.
+
+ Args:
+ current_version (str) -- version to compare elements of ext_ver_list with
+ ext_ver_list (list) -- list containing dicts with versions
+
+ Return a sorted list with versions that are higher than current_version.
+
+ Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
+ """
+ available_versions = []
+
+ for line in ext_ver_list:
+ if line['version'] == 'unpackaged':
+ continue
+
+ try:
+ if LooseVersion(line['version']) > LooseVersion(current_version):
+ available_versions.append(line['version'])
+ except Exception:
+ # When a version cannot be compared, skip it
+ # (there's a note in the documentation)
+ continue
+
+ return sorted(available_versions, key=LooseVersion)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+ if version == 'latest':
+ if available_versions:
+ version = available_versions[-1]
+ else:
+ version = ''
+
+ if version:
+ # If the specific version is passed and it is not available for update:
+ if version not in available_versions:
+ if not curr_version:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+
+ elif LooseVersion(curr_version) == LooseVersion(version):
+ changed = False
+
+ else:
+ module.fail_json(msg="Passed version '%s' is lower than "
+ "the current created version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+
+ # If the specific version is passed and it is higher that the current version:
+ if curr_version:
+ if LooseVersion(curr_version) < LooseVersion(version):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+
+ # If the specific version is passed and it is created now:
+ if curr_version == version:
+ changed = False
+
+ # If the ext doesn't exist and installed:
+ elif not curr_version and available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ if not curr_version:
+ # If the ext doesn't exist and it's installed:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If the ext doesn't exist and not installed:
+ else:
+ module.fail_json(msg="Extension %s is not installed" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py
new file mode 100644
index 00000000..6ffee31d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=yes).
+ type: bool
+ default: yes
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: no
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ required: false
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=yes)
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.general.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.general.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: no
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: yes
+ concurrent: no
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.general.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: yes
+ concurrent: no
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py
new file mode 100644
index 00000000..aeec8651
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_ping
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: yes
+ become_user: pgsql
+ community.general.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0 } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn = connect_to_db(self.module, conn_params)
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ self.db_conn.close()
+
+ self.module.params['database'] = dbname
+ return self.connect()
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+ else:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver = i[1].split('.')
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]),
+ minor=int(ext_ver[1]),
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a master."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r "
+ "JOIN pg_authid AS a ON r.usesysid = a.oid")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, a.rolname, l.lanacl "
+ "FROM pg_language AS l "
+ "JOIN pg_authid AS a ON l.lanowner = a.oid")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, a.rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n "
+ "JOIN pg_authid AS a ON a.oid = n.nspowner")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ raw = raw.split()[1].split('.')
+ self.pg_info["version"] = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql("SHOW %s" % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py
new file mode 100644
index 00000000..8b28cd9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=yes) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=no) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'no'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'no'
+ fail_on_drop:
+ description:
+ - If C(yes), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'yes'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'no'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.general.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: yes
+ force_trust: yes
+
+- name: Remove language pltclu from database testdb
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: yes
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: no
+
+- name: In testdb change owner of mylang to alice
+ community.general.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="no"),
+ force_trust=dict(type="bool", default="no"),
+ cascade=dict(type="bool", default="no"),
+ fail_on_drop=dict(type="bool", default="yes"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="yes")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=no to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py
new file mode 100644
index 00000000..3292a6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.general.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.general.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
+ default: yes
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.general.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.general.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: no
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py
new file mode 100644
index 00000000..06a09c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py
@@ -0,0 +1,453 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: yes
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - The list of role names. The ownership of all the objects within the current database,
+ and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
+ - Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(yes), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: yes
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.general.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all object in database bar owned by bob to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all object in database bar owned by bob and bill to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py
new file mode 100644
index 00000000..1f484bcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py
@@ -0,0 +1,745 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl ]
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.general 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.general.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+ - The module supports a check mode and a diff mode.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+author: Sebastiaan Mannem (@sebasmannem)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.read()
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ line = line.strip()
+ # uncomment
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ self.comment.append('#' + comment)
+ try:
+ self.add_rule(PgHbaRule(line=line))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, __path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
+ result = comment + '\n' + rule_lines
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is no valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ contype = module.params["contype"]
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ backup_file = module.params['backup_file']
+ databases = module.params["databases"]
+ dest = module.params["dest"]
+
+ method = module.params["method"]
+ netmask = module.params["netmask"]
+ options = module.params["options"]
+ order = module.params["order"]
+ source = module.params["address"]
+ state = module.params["state"]
+ users = module.params["users"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if contype:
+ try:
+ for database in databases.split(','):
+ for user in users.split(','):
+ rule = PgHbaRule(contype, database, user, source, netmask, method, options)
+ if state == "present":
+ ret['msgs'].append('Adding')
+ pg_hba.add_rule(rule)
+ else:
+ ret['msgs'].append('Removing')
+ pg_hba.remove_rule(rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(backup_file):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py
new file mode 100644
index 00000000..240cea57
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_info
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: PostgreSQL ping dbsrv server using not default credentials and ssl
+ community.general.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 10, minor: 1 }
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+ if raw:
+ self.is_available = True
+ raw = raw.split()[1].split('.')
+ self.version = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ )
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ db_connection.rollback()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py
new file mode 100644
index 00000000..e8d64f36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py
@@ -0,0 +1,1171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8)
+ - C(procedure) is supported since PostgreSQL 11 and M(community.general) collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is I(function) or I(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases:
+ - ssl_rootcert
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(no).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: yes
+ version_added: '1.2.0'
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- community.general.postgres
+
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.general.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ community.general.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.general.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.general.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.general.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.general.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.general 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.general 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: prucedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.general 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.general.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ if schema_qualifier:
+ schema_qualifier = '"%s"' % schema_qualifier
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py
new file mode 100644
index 00000000..1db80adc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed. If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- PostgreSQL version must be 10 or greater.
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables.
+ Allowable DML operations are INSERT and UPDATE only
+ community.general.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py
new file mode 100644
index 00000000..e231fbd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(community.general.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+seealso:
+- module: community.general.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.general.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.general.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.general.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.general.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.general.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.general.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.general.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ query_list = []
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ if ';' in query:
+ query_list = [q for q in query.split(';') if q != '\n']
+ else:
+ query_list.append(query)
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ query_list.append(query)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, arguments)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py
new file mode 100644
index 00000000..e7f28ecf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.general.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.general.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.general.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: yes
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py
new file mode 100644
index 00000000..50cd628a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: no
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: no
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.general.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.general.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.general.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.general.postgresql_sequence:
+ name: foobar
+ cycle: yes
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.general.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.general.postgresql_sequence:
+ name: foobar
+ cascade: yes
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: '-1'
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: '1'
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: '9223372036854775807'
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: '12'
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: str
+ sample: 'NO'
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py
new file mode 100644
index 00000000..737bded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py
@@ -0,0 +1,447 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: community.general.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.general.postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.general.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.general.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.general.postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py
new file mode 100644
index 00000000..435a6c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: yes
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: no
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.general.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.general.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (needs for immedately_reserverd needs 9.6+):
+ if self.cursor.connection.server_version < 96000:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py
new file mode 100644
index 00000000..0e2b3612
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: yes
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: yes
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(yes) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+seealso:
+- module: community.general.postgresql_publication
+- module: community.general.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=yes)
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: yes
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: no
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the master to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py
new file mode 100644
index 00000000..5260853d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: community.general.postgresql_sequence
+- module: community.general.postgresql_idx
+- module: community.general.postgresql_info
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.general.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.general.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.general.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.general.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.general.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.general.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.general.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.general.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.general.postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ community.general.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.general.postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py
new file mode 100644
index 00000000..2062e6a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.general.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.general.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.general.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py
new file mode 100644
index 00000000..79c987a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: yes
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=yes) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: yes
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(yes), does not inspect the database for password changes.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: no
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections through the options are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- Supports ``check_mode``.
+seealso:
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- module: community.general.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.general.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ community.general.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ community.general.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.general.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ community.general.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.general.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+import ansible_collections.community.general.plugins.module_utils.saslprep as saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py
new file mode 100644
index 00000000..9d03408e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+seealso:
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py
new file mode 100644
index 00000000..3d0788e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_configuration
+short_description: Updates Vertica configuration parameters.
+description:
+ - Updates Vertica configuration parameters.
+options:
+ name:
+ description:
+ - Name of the parameter to update.
+ required: true
+ aliases: [parameter]
+ type: str
+ value:
+ description:
+ - Value of the parameter to be set.
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Updating load_balance_policy
+ community.general.vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py
new file mode 100644
index 00000000..bba411d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_role
+short_description: Adds or removes Vertica database roles and assigns roles to them.
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+options:
+ name:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ choices: ['present', 'absent']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica role
+ community.general.vertica_role: name=role_name db=db_name state=present
+
+- name: Creating a new vertica role with other role assigned
+ community.general.vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
+ return False
+ return True
+
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py
new file mode 100644
index 00000000..424de564
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_schema
+short_description: Adds or removes Vertica database schema and roles.
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ default: present
+ choices: ['present', 'absent']
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica schema
+ community.general.vertica_schema: name=schema_name db=db_name state=present
+
+- name: Creating a new schema with specific schema owner
+ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: Creating a new schema with roles
+ community.general.vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
+ return False
+ if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+ return False
+ return True
+
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
+ sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(default=None, aliases=['usage_role']),
+ create_roles=dict(default=None, aliases=['create_role']),
+ owner=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py
new file mode 100644
index 00000000..f550f190
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_user
+short_description: Adds or removes Vertica database users and assigns roles.
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+options:
+ name:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ profile:
+ description:
+ - Sets the user's profile.
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ expired:
+ description:
+ - Sets the user's password expiration.
+ type: bool
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ type: bool
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ choices: ['present', 'absent', 'locked']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica user with password
+ community.general.vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: Creating a new vertica user authenticated via ldap with roles assigned
+ community.general.vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ return False
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ return False
+ return True
+
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(default=None),
+ resource_pool=dict(default=None),
+ password=dict(default=None, no_log=True),
+ expired=dict(type='bool', default=None),
+ ldap=dict(type='bool', default=None),
+ roles=dict(default=None, aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py
new file mode 100644
index 00000000..a6327dde
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to Datadog service
+description:
+- "Allows to post events to Datadog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+options:
+ api_key:
+ type: str
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ type: str
+ description: ["Your DataDog app key."]
+ required: true
+ title:
+ type: str
+ description: ["The event title."]
+ required: true
+ text:
+ type: str
+ description: ["The body of the event."]
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description: ["The priority of the event."]
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ tags:
+ type: list
+ description: ["Comma separated list of tags to apply to the event."]
+ alert_type:
+ type: str
+ description: ["Type of alert."]
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description: ["An arbitrary string to use for aggregation."]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Post an event with low priority
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+
+- name: Post an event with several tags
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
+'''
+
+import platform
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ host=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ if module.params['host'] is None:
+ module.params['host'] = platform.node().split('.')[0]
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ host=module.params['host'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py
new file mode 100644
index 00000000..f6020c2b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+ - Manages monitors within Datadog.
+ - Options as described on https://docs.datadoghq.com/api/.
+author: Sebastian Kornehl (@skornehl)
+requirements: [datadog]
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ type:
+ description:
+ - The type of the monitor.
+ choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert']
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ type: str
+ aliases: [ 'message' ]
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ default: ""
+ notify_no_data:
+ description:
+ - Whether this monitor will notify when data stops reporting.
+ type: bool
+ default: 'no'
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status.
+ - It will only re-notify if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if I(renotify_interval=None).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users will be notified on changes to this monitor.
+ type: bool
+ default: 'no'
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: 'no'
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, will be used instead of the name to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+'''
+
+EXAMPLES = '''
+- name: Create a metric monitor
+ community.general.datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
+ notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Deletes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Mutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Unmutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Use datadoghq.eu platform instead of datadoghq.com
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_host: https://api.datadoghq.eu
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(required=False),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert', 'log alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ notification_message=dict(required=False, no_log=True, default=None, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool'),
+ require_full_window=dict(required=False, default=None, type='bool'),
+ new_host_delay=dict(required=False, default=None),
+ evaluation_delay=dict(required=False, default=None),
+ id=dict(required=False),
+ include_tags=dict(required=False, default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ if 'message' in module.params:
+ module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'")
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'api_host': module.params['api_host'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ # Check if api_key and app_key is correct or not
+ # if not, then fail here.
+ response = api.Monitor.get_all()
+ if isinstance(response, dict):
+ msg = response.get('errors', None)
+ if msg:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
+
+def _get_monitor(module):
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == _fix_template_vars(module.params['name']):
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window": module.params['require_full_window'],
+ "new_host_delay": module.params['new_host_delay'],
+ "evaluation_delay": module.params['evaluation_delay'],
+ "include_tags": module.params['include_tags'],
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py
new file mode 100644
index 00000000..49c42432
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Branko Majic <branko@majic.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: dconf
+author:
+ - "Branko Majic (@azaghal)"
+short_description: Modify and read dconf database
+description:
+ - This module allows modifications and reading of dconf database. The module
+ is implemented as a wrapper around dconf tool. Please see the dconf(1) man
+ page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module
+ will try to detect an existing session and reuse it, or run the tool via
+ C(dbus-run-session).
+notes:
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
+ C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
+ distribution you are using, you may need to install additional packages to
+ have these available.
+ - Detection of existing, running D-Bus session, required to change settings
+ via C(dconf), is not 100% reliable due to implementation details of D-Bus
+ daemon itself. This might lead to running applications not picking-up
+ changes on the fly if options are changed via Ansible and
+ C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
+ utilises an unusual syntax for the values (GVariant). For example, if you
+ wanted to provide a string value, the correct syntax would be
+ C(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ value.
+ - When using loops in combination with a value like
+ :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
+ type conversions. Applying a filter :code:`"{{ item.value | string }}"`
+ to the parameter variable can avoid potential conversion problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a
+ key is by making the configuration change in application affected by the
+ key, and then having a look at value set via commands C(dconf dump
+ /path/to/dir/) or C(dconf read /path/to/key).
+options:
+ key:
+ type: str
+ required: true
+ description:
+ - A dconf key to modify or read from the dconf database.
+ value:
+ type: str
+ required: false
+ description:
+ - Value to set for the specified dconf key. Value should be specified in
+ GVariant format. Due to complexity of this format, it is best to have a
+ look at existing values in the dconf database. Required for
+ C(state=present).
+ state:
+ type: str
+ required: false
+ default: present
+ choices:
+ - read
+ - present
+ - absent
+ description:
+ - The action to take upon the key/value.
+'''
+
+RETURN = """
+value:
+ description: value associated with the requested key
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
+"""
+
+EXAMPLES = """
+- name: Configure available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ value: "[('xkb', 'us'), ('xkb', 'se')]"
+ state: present
+
+- name: Read currently available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: absent
+
+- name: Configure available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ value: "['us', 'se']"
+ state: present
+
+- name: Read currently available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: absent
+
+- name: Disable desktop effects in Cinnamon
+ community.general.dconf:
+ key: "/org/cinnamon/desktop-effects"
+ value: "false"
+ state: present
+"""
+
+
+import os
+import traceback
+
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ psutil_found = True
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+ psutil_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class DBusWrapper(object):
+ """
+ Helper class that can be used for running a command with a working D-Bus
+ session.
+
+ If possible, command will be run against an existing D-Bus session,
+ otherwise the session will be spawned via dbus-run-session.
+
+ Example usage:
+
+ dbus_wrapper = DBusWrapper(ansible_module)
+ dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
+ """
+
+ def __init__(self, module):
+ """
+ Initialises an instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+ """
+
+ # Store passed-in arguments and set-up some defaults.
+ self.module = module
+
+ # Try to extract existing D-Bus session address.
+ self.dbus_session_bus_address = self._get_existing_dbus_session()
+
+ # If no existing D-Bus session was detected, check if dbus-run-session
+ # is available.
+ if self.dbus_session_bus_address is None:
+ self.module.get_bin_path('dbus-run-session', required=True)
+
+ def _get_existing_dbus_session(self):
+ """
+ Detects and returns an existing D-Bus session bus address.
+
+ :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
+ """
+
+ # We'll be checking the processes of current user only.
+ uid = os.getuid()
+
+ # Go through all the pids for this user, try to extract the D-Bus
+ # session bus address from environment, and ensure it is possible to
+ # connect to it.
+ self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
+
+ for pid in psutil.pids():
+ process = psutil.Process(pid)
+ process_real_uid, _, _ = process.uids()
+ try:
+ if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
+ dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
+ self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
+ command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
+ rc, _, _ = self.module.run_command(command)
+
+ if rc == 0:
+ self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
+
+ return dbus_session_bus_address_candidate
+
+ # This can happen with things like SSH sessions etc.
+ except psutil.AccessDenied:
+ pass
+
+ self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
+
+ return None
+
+ def run_command(self, command):
+ """
+ Runs the specified command within a functional D-Bus session. Command is
+ effectively passed-on to AnsibleModule.run_command() method, with
+ modification for using dbus-run-session if necessary.
+
+ :param command: Command to run, including parameters. Each element of the list should be a string.
+ :type module: list
+
+ :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
+ """
+
+ if self.dbus_session_bus_address is None:
+ self.module.debug("Using dbus-run-session wrapper for running commands.")
+ command = ['dbus-run-session'] + command
+ rc, out, err = self.module.run_command(command)
+
+ if self.dbus_session_bus_address is None and rc == 127:
+ self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
+ else:
+ extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
+ rc, out, err = self.module.run_command(command, environ_update=extra_environment)
+
+ return rc, out, err
+
+
+class DconfPreference(object):
+
+ def __init__(self, module, check_mode=False):
+ """
+ Initialises instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+
+ :param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
+ :type check_mode: bool
+ """
+
+ self.module = module
+ self.check_mode = check_mode
+
+ def read(self, key):
+ """
+ Retrieves current value associated with the dconf key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
+ """
+
+ command = ["dconf", "read", key]
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
+
+ if out == '':
+ value = None
+ else:
+ value = out.rstrip('\n')
+
+ return value
+
+ def write(self, key, value):
+ """
+ Writes the value for specified key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key for which the value should be set. Should be a full path.
+ :type key: str
+
+ :param value: Value to set for the specified dconf key. Should be specified in GVariant format.
+ :type value: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # If no change is needed (or won't be done due to check_mode), notify
+ # caller straight away.
+ if value == self.read(key):
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for write operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "write", key, value]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+ def reset(self, key):
+ """
+ Returns value for the specified key (removes it from user configuration).
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key to reset. Should be a full path.
+ :type key: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # Read the current value first.
+ current_value = self.read(key)
+
+ # No change was needed, key is not set at all, or just notify user if we
+ # are in check mode.
+ if current_value is None:
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for reset operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "reset", key]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent', 'read']),
+ key=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not psutil_found:
+ module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
+
+ # If present state was specified, value must be provided.
+ if module.params['state'] == 'present' and module.params['value'] is None:
+ module.fail_json(msg='State "present" requires "value" to be set.')
+
+ # Create wrapper instance.
+ dconf = DconfPreference(module, module.check_mode)
+
+ # Process based on different states.
+ if module.params['state'] == 'read':
+ value = dconf.read(module.params['key'])
+ module.exit_json(changed=False, value=value)
+ elif module.params['state'] == 'present':
+ changed = dconf.write(module.params['key'], module.params['value'])
+ module.exit_json(changed=changed)
+ elif module.params['state'] == 'absent':
+ changed = dconf.reset(module.params['key'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py
new file mode 100644
index 00000000..641cc1d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ type: path
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ type: str
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ type: str
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ type: str
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ type: path
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ type: path
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ type: str
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ type: int
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ community.general.deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ ansible.builtin.git:
+ repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ community.general.deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- community.general.deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- community.general.deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- community.general.deploy_helper:
+ path: /path/to/root
+- ansible.builtin.debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
new file mode 100644
index 00000000..2187ceaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_network
+short_description: Create, update, and delete MCP 1.0 & 2.0 networks
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Create, update, and delete MCP 1.0 & 2.0 networks
+author: 'Aimon Bustardo (@aimonb)'
+options:
+ name:
+ description:
+ - The name of the network domain to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Additional description of the network domain.
+ required: false
+ type: str
+ service_plan:
+ description:
+ - The service plan, either "ESSENTIALS" or "ADVANCED".
+ - MCP 2.0 Only.
+ choices: [ESSENTIALS, ADVANCED]
+ default: ESSENTIALS
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an MCP 1.0 network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA5
+ name: mynet
+
+- name: Create an MCP 2.0 network
+ community.general.dimensiondata_network:
+ region: na
+ mcp_user: my_user
+ mcp_password: my_password
+ location: NA9
+ name: mynet
+ service_plan: ADVANCED
+
+- name: Delete a network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA1
+ name: mynet
+ state: absent
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only)
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only)
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only)
+ type: bool
+ sample: false
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
+from ansible.module_utils._text import to_native
+
+if HAS_LIBCLOUD:
+ from libcloud.compute.base import NodeLocation
+ from libcloud.common.dimensiondata import DimensionDataAPIException
+
+
+class DimensionDataNetworkModule(DimensionDataModule):
+ """
+ The dimensiondata_network module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data network module.
+ """
+
+ super(DimensionDataNetworkModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.service_plan = self.module.params['service_plan']
+ self.state = self.module.params['state']
+
+ def state_present(self):
+ network = self._get_network()
+
+ if network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network already exists',
+ network=self._network_to_dict(network)
+ )
+
+ network = self._create_network()
+
+ self.module.exit_json(
+ changed=True,
+ msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
+ network=self._network_to_dict(network)
+ )
+
+ def state_absent(self):
+ network = self._get_network()
+
+ if not network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network "%s" does not exist' % self.name,
+ network=self._network_to_dict(network)
+ )
+
+ self._delete_network(network)
+
+ def _get_network(self):
+ if self.mcp_version == '1.0':
+ networks = self.driver.list_networks(location=self.location)
+ else:
+ networks = self.driver.ex_list_network_domains(location=self.location)
+
+ matched_network = [network for network in networks if network.name == self.name]
+ if matched_network:
+ return matched_network[0]
+
+ return None
+
+ def _network_to_dict(self, network):
+ network_dict = dict(
+ id=network.id,
+ name=network.name,
+ description=network.description
+ )
+
+ if isinstance(network.location, NodeLocation):
+ network_dict['location'] = network.location.id
+ else:
+ network_dict['location'] = network.location
+
+ if self.mcp_version == '1.0':
+ network_dict['private_net'] = network.private_net
+ network_dict['multicast'] = network.multicast
+ network_dict['status'] = None
+ else:
+ network_dict['private_net'] = None
+ network_dict['multicast'] = None
+ network_dict['status'] = network.status
+
+ return network_dict
+
+ def _create_network(self):
+
+ # Make sure service_plan argument is defined
+ if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
+ self.module.fail_json(
+ msg='service_plan required when creating network and location is MCP 2.0'
+ )
+
+ # Create network
+ try:
+ if self.mcp_version == '1.0':
+ network = self.driver.ex_create_network(
+ self.location,
+ self.name,
+ description=self.description
+ )
+ else:
+ network = self.driver.ex_create_network_domain(
+ self.location,
+ self.name,
+ self.module.params['service_plan'],
+ description=self.description
+ )
+ except DimensionDataAPIException as e:
+
+ self.module.fail_json(
+ msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ if self.module.params['wait'] is True:
+ network = self._wait_for_network_state(network.id, 'NORMAL')
+
+ return network
+
+ def _delete_network(self, network):
+ try:
+ if self.mcp_version == '1.0':
+ deleted = self.driver.ex_delete_network(network)
+ else:
+ deleted = self.driver.ex_delete_network_domain(network)
+
+ if deleted:
+ self.module.exit_json(
+ changed=True,
+ msg="Deleted network with id %s" % network.id
+ )
+
+ self.module.fail_json(
+ "Unexpected failure deleting network with id %s", network.id
+ )
+
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ def _wait_for_network_state(self, net_id, state_to_wait_for):
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_network_domain,
+ self.module.params['wait_poll_interval'],
+ self.module.params['wait_time'],
+ net_id
+ )
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
+ exception=traceback.format_exc()
+ )
+
+
+def main():
+ module = DimensionDataNetworkModule()
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
new file mode 100644
index 00000000..26c621f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_vlan
+short_description: Manage a VLAN in a Cloud Control network domain.
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Manage VLANs in Cloud Control network domains.
+author: 'Adam Friedman (@tintoy)'
+options:
+ name:
+ description:
+ - The name of the target VLAN.
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the VLAN.
+ type: str
+ network_domain:
+ description:
+ - The Id or name of the target network domain.
+ required: true
+ type: str
+ private_ipv4_base_address:
+ description:
+ - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ type: str
+ private_ipv4_prefix_size:
+ description:
+ - The size of the IPv4 address space, e.g 24.
+ - Required, if C(private_ipv4_base_address) is specified.
+ type: int
+ state:
+ description:
+ - The desired state for the target VLAN.
+ - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ choices: [present, absent, readonly]
+ default: present
+ type: str
+ allow_expand:
+ description:
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
+ - If C(False), the module will fail under these conditions.
+ - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Add or update VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ description: A test VLAN
+ private_ipv4_base_address: 192.168.23.0
+ private_ipv4_prefix_size: 24
+ state: present
+ wait: yes
+
+- name: Read / get VLAN details
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ state: readonly
+ wait: yes
+
+- name: Delete a VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan_1
+ state: absent
+ wait: yes
+'''
+
+RETURN = '''
+vlan:
+ description: Dictionary describing the VLAN.
+ returned: On success when I(state) is 'present'
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
+
+try:
+ from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
+
+ HAS_LIBCLOUD = True
+
+except ImportError:
+ DimensionDataVlan = None
+
+ HAS_LIBCLOUD = False
+
+
+class DimensionDataVlanModule(DimensionDataModule):
+ """
+ The dimensiondata_vlan module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data VLAN module.
+ """
+
+ super(DimensionDataVlanModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ network_domain=dict(required=True, type='str'),
+ private_ipv4_base_address=dict(default='', type='str'),
+ private_ipv4_prefix_size=dict(default=0, type='int'),
+ allow_expand=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'readonly'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.network_domain_selector = self.module.params['network_domain']
+ self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
+ self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
+ self.state = self.module.params['state']
+ self.allow_expand = self.module.params['allow_expand']
+
+ if self.wait and self.state != 'present':
+ self.module.fail_json(
+ msg='The wait parameter is only supported when state is "present".'
+ )
+
+ def state_present(self):
+ """
+ Ensure that the target VLAN is present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ vlan = self._create_vlan(network_domain)
+ self.module.exit_json(
+ msg='Created VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+ else:
+ diff = VlanDiff(vlan, self.module.params)
+ if not diff.has_changes():
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+
+ return
+
+ try:
+ diff.ensure_legal_change()
+ except InvalidVlanChangeError as invalid_vlan_change:
+ self.module.fail_json(
+ msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
+ self.name, self.network_domain_selector, invalid_vlan_change
+ )
+ )
+
+ if diff.needs_expand() and not self.allow_expand:
+ self.module.fail_json(
+ msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
+ self.private_ipv4_prefix_size
+ ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
+ vlan.private_ipv4_range_size
+ ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
+ )
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ if diff.needs_edit():
+ vlan.name = self.name
+ vlan.description = self.description
+
+ self.driver.ex_update_vlan(vlan)
+
+ if diff.needs_expand():
+ vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
+ self.driver.ex_expand_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Updated VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ def state_readonly(self):
+ """
+ Read the target VLAN's state.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if vlan:
+ self.module.exit_json(
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+ else:
+ self.module.fail_json(
+ msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ )
+ )
+
+ def state_absent(self):
+ """
+ Ensure that the target VLAN is not present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=False
+ )
+
+ return
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ self._delete_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ def _get_vlan(self, network_domain):
+ """
+ Retrieve the target VLAN details from CloudControl.
+
+ :param network_domain: The target network domain.
+ :return: The VLAN, or None if the target VLAN was not found.
+ :rtype: DimensionDataVlan
+ """
+
+ vlans = self.driver.ex_list_vlans(
+ location=self.location,
+ network_domain=network_domain
+ )
+ matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
+ if matching_vlans:
+ return matching_vlans[0]
+
+ return None
+
+ def _create_vlan(self, network_domain):
+ vlan = self.driver.ex_create_vlan(
+ network_domain,
+ self.name,
+ self.private_ipv4_base_address,
+ self.description,
+ self.private_ipv4_prefix_size
+ )
+
+ if self.wait:
+ vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
+
+ return vlan
+
+ def _delete_vlan(self, vlan):
+ try:
+ self.driver.ex_delete_vlan(vlan)
+
+ # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
+ if self.wait:
+ self._wait_for_vlan_state(vlan, 'NOT_FOUND')
+
+ except DimensionDataAPIException as api_exception:
+ self.module.fail_json(
+ msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
+ vlan.id, api_exception.msg
+ )
+ )
+
+ def _wait_for_vlan_state(self, vlan, state_to_wait_for):
+ network_domain = self._get_network_domain()
+
+ wait_poll_interval = self.module.params['wait_poll_interval']
+ wait_time = self.module.params['wait_time']
+
+ # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
+
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_vlan,
+ wait_poll_interval,
+ wait_time,
+ vlan
+ )
+
+ except DimensionDataAPIException as api_exception:
+ if api_exception.code != 'RESOURCE_NOT_FOUND':
+ raise
+
+ return DimensionDataVlan(
+ id=vlan.id,
+ status='NOT_FOUND',
+ name='',
+ description='',
+ private_ipv4_range_address='',
+ private_ipv4_range_size=0,
+ ipv4_gateway='',
+ ipv6_range_address='',
+ ipv6_range_size=0,
+ ipv6_gateway='',
+ location=self.location,
+ network_domain=network_domain
+ )
+
+ def _get_network_domain(self):
+ """
+ Retrieve the target network domain from the Cloud Control API.
+
+ :return: The network domain.
+ """
+
+ try:
+ return self.get_network_domain(
+ self.network_domain_selector, self.location
+ )
+ except UnknownNetworkError:
+ self.module.fail_json(
+ msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
+ self.network_domain_selector, self.location
+ )
+ )
+
+ return None
+
+
+class InvalidVlanChangeError(Exception):
+ """
+ Error raised when an illegal change to VLAN state is attempted.
+ """
+
+ pass
+
+
+class VlanDiff(object):
+ """
+ Represents differences between VLAN information (from CloudControl) and module parameters.
+ """
+
+ def __init__(self, vlan, module_params):
+ """
+
+ :param vlan: The VLAN information from CloudControl.
+ :type vlan: DimensionDataVlan
+ :param module_params: The module parameters.
+ :type module_params: dict
+ """
+
+ self.vlan = vlan
+ self.module_params = module_params
+
+ self.name_changed = module_params['name'] != vlan.name
+ self.description_changed = module_params['description'] != vlan.description
+ self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
+ self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
+
+ # Is configured prefix size greater than or less than the actual prefix size?
+ private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
+ self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
+ self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
+
+ def has_changes(self):
+ """
+ Does the VlanDiff represent any changes between the VLAN and module configuration?
+
+ :return: True, if there are change changes; otherwise, False.
+ """
+
+ return self.needs_edit() or self.needs_expand()
+
+ def ensure_legal_change(self):
+ """
+ Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
+
+ - private_ipv4_base_address cannot be changed
+ - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
+
+ :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
+ """
+
+ # Cannot change base address for private IPv4 network.
+ if self.private_ipv4_base_address_changed:
+ raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
+
+ # Cannot shrink private IPv4 network (by increasing prefix size).
+ if self.private_ipv4_prefix_size_increased:
+ raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
+
+ def needs_edit(self):
+ """
+ Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
+
+ :return: True, if an Edit operation is required; otherwise, False.
+ """
+
+ return self.name_changed or self.description_changed
+
+ def needs_expand(self):
+ """
+ Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
+
+ The VLAN's network is expanded by reducing the size of its network prefix.
+
+ :return: True, if an Expand operation is required; otherwise, False.
+ """
+
+ return self.private_ipv4_prefix_size_decreased
+
+
+def vlan_to_dict(vlan):
+ return {
+ 'id': vlan.id,
+ 'name': vlan.name,
+ 'description': vlan.description,
+ 'location': vlan.location.id,
+ 'private_ipv4_base_address': vlan.private_ipv4_range_address,
+ 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
+ 'private_ipv4_gateway_address': vlan.ipv4_gateway,
+ 'ipv6_base_address': vlan.ipv6_range_address,
+ 'ipv6_prefix_size': vlan.ipv6_range_size,
+ 'ipv6_gateway_address': vlan.ipv6_gateway,
+ 'status': vlan.status
+ }
+
+
+def main():
+ module = DimensionDataVlanModule()
+
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'readonly':
+ module.state_readonly()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py
new file mode 100644
index 00000000..10161c04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: django_manage
+short_description: Manages a Django application.
+description:
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
+ C(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+options:
+ command:
+ description:
+ - The name of the Django management command to run. Built in commands are C(cleanup), C(collectstatic),
+ C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may
+ prompt for user input should be run with the C(--noinput) flag.
+ - The module will perform some basic parameter validation (when applicable) to the commands C(cleanup),
+ C(collectstatic), C(createcachetable), C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ type: str
+ required: true
+ project_path:
+ description:
+ - The path to the root of the Django application where B(manage.py) lives.
+ type: path
+ required: true
+ aliases: [app_path, chdir]
+ settings:
+ description:
+ - The Python path to the application's settings module, such as C(myapp.settings).
+ type: path
+ required: false
+ pythonpath:
+ description:
+ - A directory to add to the Python path. Typically used to include the settings module if it is located
+ external to the application directory.
+ type: path
+ required: false
+ aliases: [python_path]
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) installation to use while running the manage application.
+ type: path
+ aliases: [virtual_env]
+ apps:
+ description:
+ - A list of space-delimited apps to target. Used by the C(test) command.
+ type: str
+ required: false
+ cache_table:
+ description:
+ - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ type: str
+ required: false
+ clear:
+ description:
+ - Clear the existing files before trying to copy or link the original file.
+ - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
+ required: false
+ default: no
+ type: bool
+ database:
+ description:
+ - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
+ and C(migrate) commands.
+ type: str
+ required: false
+ failfast:
+ description:
+ - Fail the command immediately if a test fails. Used by the C(test) command.
+ required: false
+ default: false
+ type: bool
+ aliases: [fail_fast]
+ fixtures:
+ description:
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ type: str
+ required: false
+ skip:
+ description:
+ - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ required: false
+ type: bool
+ merge:
+ description:
+ - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
+ parameter with C(migrate) command.
+ required: false
+ type: bool
+ link:
+ description:
+ - Will create links to the files instead of copying them, you can only use this parameter with
+ C(collectstatic) command.
+ required: false
+ type: bool
+ liveserver:
+ description:
+ - This parameter was implemented a long time ago in a galaxy far way. It probably relates to the
+ django-liveserver package, which is no longer updated.
+ - Hence, it will be considered DEPRECATED and should be removed in a future release.
+ type: str
+ required: false
+ aliases: [live_server]
+ testrunner:
+ description:
+ - "From the Django docs: Controls the test runner class that is used to execute tests."
+ - This parameter is passed as-is to C(manage.py).
+ type: str
+ required: false
+ aliases: [test_runner]
+notes:
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
+ is specified.
+ - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
+ exist at the given location.
+ - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ unfortunately.
+ - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ as an app in your settings.
+ - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
+ i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+requirements: [ "virtualenv", "django" ]
+author: "Scott Anderson (@tastychutney)"
+'''
+
+EXAMPLES = """
+- name: Run cleanup on the application installed in django_dir
+ community.general.django_manage:
+ command: cleanup
+ project_path: "{{ django_dir }}"
+
+- name: Load the initial_data fixture into the application
+ community.general.django_manage:
+ command: loaddata
+ project_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
+
+- name: Run syncdb on the application
+ community.general.django_manage:
+ command: syncdb
+ project_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
+
+- name: Run the SmokeTest test case from the main app. Useful for testing deploys
+ community.general.django_manage:
+ command: test
+ project_path: "{{ django_dir }}"
+ apps: main.SmokeTest
+
+- name: Create an initial superuser
+ community.general.django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ project_path: "{{ django_dir }}"
+"""
+
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _fail(module, cmd, out, err, **kwargs):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg, **kwargs)
+
+
+def _ensure_virtualenv(module):
+
+ venv_param = module.params['virtualenv']
+ if venv_param is None:
+ return
+
+ vbin = os.path.join(venv_param, 'bin')
+ activate = os.path.join(vbin, 'activate')
+
+ if not os.path.exists(activate):
+ virtualenv = module.get_bin_path('virtualenv', True)
+ vcmd = [virtualenv, venv_param]
+ rc, out_venv, err_venv = module.run_command(vcmd)
+ if rc != 0:
+ _fail(module, vcmd, out_venv, err_venv)
+
+ os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
+ os.environ["VIRTUAL_ENV"] = venv_param
+
+
+def createcachetable_check_changed(output):
+ return "already exists" not in output
+
+
+def flush_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def loaddata_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def syncdb_filter_output(line):
+ return ("Creating table " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line)
+
+
+def migrate_filter_output(line):
+ return ("Migrating forwards " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line) \
+ or ("Applying" in line)
+
+
+def collectstatic_filter_output(line):
+ return line and "0 static files" not in line
+
+
+def main():
+ command_allowed_param_map = dict(
+ cleanup=(),
+ createcachetable=('cache_table', 'database', ),
+ flush=('database', ),
+ loaddata=('database', 'fixtures', ),
+ syncdb=('database', ),
+ test=('failfast', 'testrunner', 'liveserver', 'apps', ),
+ validate=(),
+ migrate=('apps', 'skip', 'merge', 'database',),
+ collectstatic=('clear', 'link', ),
+ )
+
+ command_required_param_map = dict(
+ loaddata=('fixtures', ),
+ )
+
+ # forces --noinput on every command that needs it
+ noinput_commands = (
+ 'flush',
+ 'syncdb',
+ 'migrate',
+ 'test',
+ 'collectstatic',
+ )
+
+ # These params are allowed for certain commands only
+ specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
+
+ # These params are automatically added to the command if present
+ general_params = ('settings', 'pythonpath', 'database',)
+ specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
+ end_of_command_params = ('apps', 'cache_table', 'fixtures')
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True, type='str'),
+ project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
+ settings=dict(default=None, required=False, type='path'),
+ pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']),
+ virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
+
+ apps=dict(default=None, required=False),
+ cache_table=dict(default=None, required=False, type='str'),
+ clear=dict(default=False, required=False, type='bool'),
+ database=dict(default=None, required=False, type='str'),
+ failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']),
+ fixtures=dict(default=None, required=False, type='str'),
+ liveserver=dict(default=None, required=False, type='str', aliases=['live_server'],
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']),
+ skip=dict(default=None, required=False, type='bool'),
+ merge=dict(default=None, required=False, type='bool'),
+ link=dict(default=None, required=False, type='bool'),
+ ),
+ )
+
+ command = module.params['command']
+ project_path = module.params['project_path']
+ virtualenv = module.params['virtualenv']
+
+ for param in specific_params:
+ value = module.params[param]
+ if param in specific_boolean_params:
+ value = module.boolean(value)
+ if value and param not in command_allowed_param_map[command]:
+ module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
+
+ for param in command_required_param_map.get(command, ()):
+ if not module.params[param]:
+ module.fail_json(msg='%s param is required for command=%s' % (param, command))
+
+ _ensure_virtualenv(module)
+
+ cmd = "./manage.py %s" % (command, )
+
+ if command in noinput_commands:
+ cmd = '%s --noinput' % cmd
+
+ for param in general_params:
+ if module.params[param]:
+ cmd = '%s --%s=%s' % (cmd, param, module.params[param])
+
+ for param in specific_boolean_params:
+ if module.boolean(module.params[param]):
+ cmd = '%s --%s' % (cmd, param)
+
+ # these params always get tacked on the end of the command
+ for param in end_of_command_params:
+ if module.params[param]:
+ cmd = '%s %s' % (cmd, module.params[param])
+
+ rc, out, err = module.run_command(cmd, cwd=project_path)
+ if rc != 0:
+ if command == 'createcachetable' and 'table' in err and 'already exists' in err:
+ out = 'already exists.'
+ else:
+ if "Unknown command:" in err:
+ _fail(module, cmd, err, "Unknown django command: %s" % command)
+ _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
+
+ changed = False
+
+ lines = out.split('\n')
+ filt = globals().get(command + "_filter_output", None)
+ if filt:
+ filtered_output = list(filter(filt, lines))
+ if len(filtered_output):
+ changed = True
+ check_changed = globals().get("{0}_check_changed".format(command), None)
+ if check_changed:
+ changed = check_changed(out)
+
+ module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path,
+ virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py
new file mode 100644
index 00000000..1c814a9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+short_description: Interface with dnsimple.com (a DNS hosting service)
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+notes:
+ - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API.
+options:
+ account_email:
+ description:
+ - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
+ type: str
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for more information.
+ type: str
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ type: str
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ type: str
+ record_ids:
+ description:
+ - List of records to ensure they either exist or do not exist.
+ type: list
+ type:
+ description:
+ - The type of DNS record to create.
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ type: str
+ ttl:
+ description:
+ - The TTL to give the new record in seconds.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Record value.
+ - Must be specified when trying to ensure a record exists.
+ type: str
+ priority:
+ description:
+ - Record priority.
+ type: int
+ state:
+ description:
+ - whether the record should exist or not.
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state) is set to C(present) on a record.
+ type: 'bool'
+ default: no
+requirements:
+ - "dnsimple >= 1.0.0"
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+- name: Authenticate using email and API token and fetch all domains
+ community.general.dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
+
+- name: Fetch my.com domain records
+ community.general.dnsimple:
+ domain: my.com
+ state: present
+ delegate_to: localhost
+ register: records
+
+- name: Delete a domain
+ community.general.dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
+
+- name: Create a test.my.com A record to point to 127.0.0.1
+ community.general.dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
+ register: record
+
+- name: Delete record using record_ids
+ community.general.dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ state: absent
+ delegate_to: localhost
+
+- name: Create a my.com CNAME record to example.com
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
+
+- name: Change TTL value for a record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
+
+- name: Delete the record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r"""# """
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+DNSIMPLE_IMP_ERR = None
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import __version__ as dnsimple_version
+ from dnsimple.dnsimple import DNSimpleException
+ HAS_DNSIMPLE = True
+except ImportError:
+ DNSIMPLE_IMP_ERR = traceback.format_exc()
+ HAS_DNSIMPLE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_email=dict(type='str'),
+ account_api_token=dict(type='str', no_log=True),
+ domain=dict(type='str'),
+ record=dict(type='str'),
+ record_ids=dict(type='list'),
+ type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL']),
+ ttl=dict(type='int', default=3600),
+ value=dict(type='str'),
+ priority=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ solo=dict(type='bool', default=False),
+ ),
+ required_together=[
+ ['record', 'value']
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR)
+
+ if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'):
+ module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated."
+ " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version)
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+
+ # state is absent
+ else:
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain), params={'name': record})]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['priority'] != priority:
+ data = {}
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'type': record_type,
+ 'content': value,
+ }
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+
+ # state is absent
+ else:
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+
+ # state is absent
+ else:
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ except DNSimpleException as e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
new file mode 100644
index 00000000..75135c82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
+description:
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ type: str
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
+ required: true
+ type: str
+
+ sandbox:
+ description:
+ - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
+ type: bool
+ default: 'no'
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
+ type: str
+
+ record_type:
+ description:
+ - Record type.
+ choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ type: str
+
+ record_value:
+ description:
+ - >
+ Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
+ SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ type: str
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ default: 1800
+ type: int
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+ monitor:
+ description:
+ - If C(yes), add or change the monitor. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ systemDescription:
+ description:
+ - Description used by the monitor.
+ default: ''
+ type: str
+
+ maxEmails:
+ description:
+ - Number of emails sent to the contact list by the monitor.
+ default: 1
+ type: int
+
+ protocol:
+ description:
+ - Protocol used by the monitor.
+ default: 'HTTP'
+ choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
+ type: str
+
+ port:
+ description:
+ - Port used by the monitor.
+ default: 80
+ type: int
+
+ sensitivity:
+ description:
+ - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
+ default: 'Medium'
+ choices: ['Low', 'Medium', 'High']
+ type: str
+
+ contactList:
+ description:
+ - Name or id of the contact list that the monitor will notify.
+ - The default C('') means the Account Owner.
+ default: ''
+ type: str
+
+ httpFqdn:
+ description:
+ - The fully qualified domain name used by the monitor.
+ type: str
+
+ httpFile:
+ description:
+ - The file at the Fqdn that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ httpQueryString:
+ description:
+ - The string in the httpFile that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ failover:
+ description:
+ - If C(yes), add or change the failover. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ autoFailover:
+ description:
+ - If true, fallback to the primary IP address is manual after a failover.
+ - If false, fallback to the primary IP address is automatic after a failover.
+ type: bool
+ default: 'no'
+
+ ip1:
+ description:
+ - Primary IP address for the failover.
+ - Required if adding or changing the monitor or failover.
+ type: str
+
+ ip2:
+ description:
+ - Secondary IP address for the failover.
+ - Required if adding or changing the failover.
+ type: str
+
+ ip3:
+ description:
+ - Tertiary IP address for the failover.
+ type: str
+
+ ip4:
+ description:
+ - Quaternary IP address for the failover.
+ type: str
+
+ ip5:
+ description:
+ - Quinary IP address for the failover.
+ type: str
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
+ These values can be be registered and used in your playbooks.
+ - Only A records can have a monitor or failover.
+ - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
+ - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
+ - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+- name: Fetch my.com domain records
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ register: response
+
+- name: Create a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+
+- name: Update the previously created record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
+
+- name: Fetch a specific record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ register: response
+
+- name: Delete a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ record_type: A
+ state: absent
+ record_name: test
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ ip3: 127.0.0.4
+ ip4: 127.0.0.5
+ ip5: 127.0.0.6
+
+- name: Add a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: my contact list
+
+- name: Add a monitor with http options
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: 1174 # contact list id
+ httpFqdn: http://my.com
+ httpFile: example
+ httpQueryString: some string
+
+- name: Add a monitor and a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ monitor: yes
+ protocol: HTTPS
+ port: 443
+ maxEmails: 1
+ systemDescription: monitoring my.com status
+ contactList: emergencycontacts
+
+- name: Remove a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: no
+
+- name: Remove a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: no
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import json
+import hashlib
+import hmac
+import locale
+from time import strftime, gmtime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six import string_types
+
+
+class DME2(object):
+
+ def __init__(self, apikey, secret, domain, sandbox, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+
+ if sandbox:
+ self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
+ self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
+ else:
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+ self.contactList_map = None # ["contactList_name"] => ID
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+ self.monitor_url = 'monitor'
+ self.contactList_url = 'contactList'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ locale.setlocale(locale.LC_TIME, 'C')
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, string_types):
+ data = urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ # Note that TXT records are surrounded by quotes in the API response.
+ elif record_type == "TXT":
+ value = '"{0}"'.format(record_value)
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ # @TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ # @TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+ def getMonitor(self, record_id):
+ return self.query(self.monitor_url + '/' + str(record_id), 'GET')
+
+ def updateMonitor(self, record_id, data):
+ return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
+
+ def prepareMonitor(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def getContactList(self, contact_list_id):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.contactLists.get(contact_list_id, False)
+
+ def getContactlists(self):
+ return self.query(self.contactList_url, 'GET')['data']
+
+ def getContactListByName(self, name):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.getContactList(self.contactList_map.get(name, 0))
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True, no_log=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ sandbox=dict(default=False, type='bool'),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ monitor=dict(default=False, type='bool'),
+ systemDescription=dict(default=''),
+ maxEmails=dict(default=1, type='int'),
+ protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
+ port=dict(default=80, type='int'),
+ sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
+ contactList=dict(default=None),
+ httpFqdn=dict(required=False),
+ httpFile=dict(required=False),
+ httpQueryString=dict(required=False),
+ failover=dict(default=False, type='bool'),
+ autoFailover=dict(default=False, type='bool'),
+ ip1=dict(required=False),
+ ip2=dict(required=False),
+ ip3=dict(required=False),
+ ip4=dict(required=False),
+ ip5=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_together=[
+ ['record_value', 'record_ttl', 'record_type']
+ ],
+ required_if=[
+ ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
+ ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
+ ]
+ )
+
+ protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
+ sensitivities = dict(Low=8, Medium=5, High=3)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module.params["sandbox"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Fetch existing monitor if the A record indicates it should exist and build the new monitor
+ current_monitor = dict()
+ new_monitor = dict()
+ if current_record and current_record['type'] == 'A':
+ current_monitor = DME.getMonitor(current_record['id'])
+
+ # Build the new monitor
+ for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
+ 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
+ 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
+ if module.params[i] is not None:
+ if i == 'protocol':
+ # The API requires protocol to be a numeric in the range 1-6
+ new_monitor['protocolId'] = protocols[module.params[i]]
+ elif i == 'sensitivity':
+ # The API requires sensitivity to be a numeric of 8, 5, or 3
+ new_monitor[i] = sensitivities[module.params[i]]
+ elif i == 'contactList':
+ # The module accepts either the name or the id of the contact list
+ contact_list_id = module.params[i]
+ if not contact_list_id.isdigit() and contact_list_id != '':
+ contact_list = DME.getContactListByName(contact_list_id)
+ if not contact_list:
+ module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
+ contact_list_id = contact_list.get('id', '')
+ new_monitor['contactListId'] = contact_list_id
+ else:
+ # The module option names match the API field names
+ new_monitor[i] = module.params[i]
+
+ # Compare new record against existing one
+ record_changed = False
+ if current_record:
+ for i in new_record:
+ # Remove leading and trailing quote character from values because TXT records
+ # are surrounded by quotes.
+ if str(current_record[i]).strip('"') != str(new_record[i]):
+ record_changed = True
+ new_record['id'] = str(current_record['id'])
+
+ monitor_changed = False
+ if current_monitor:
+ for i in new_monitor:
+ if str(current_monitor.get(i)) != str(new_monitor[i]):
+ monitor_changed = True
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if "value" not in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ # create record and monitor as the record does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ if new_monitor.get('monitor') and record_type == "A":
+ monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
+ module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
+ else:
+ module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
+
+ # update the record
+ updated = False
+ if record_changed:
+ DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
+ updated = True
+ if monitor_changed:
+ DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
+ updated = True
+ if updated:
+ module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ elif state == 'absent':
+ changed = False
+ # delete the record (and the monitor/failover) if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=changed)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py
new file mode 100644
index 00000000..5e7e426c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+options:
+ data:
+ description:
+ - The value of the config. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.general.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the config data
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove config foo
+ community.general.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ for config in configs:
+ if config['Spec']['Name'] == self.name:
+ return config
+ return None
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ config_id = self.client.create_config(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ config = self.get_config()
+ if config:
+ self.results['config_id'] = config['ID']
+ data_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ config = self.get_config()
+ if config:
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py
new file mode 100644
index 00000000..30033ebf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py
@@ -0,0 +1,3563 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+
+notes:
+ - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify *all* options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+ Please note that several options have default values; if the container to be restarted uses different values for
+ these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
+ I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
+ can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
+ community.general 3.0.0 on.
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: no
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as C(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which isn't present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to *all* comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - Various module options used to have default values. This causes problems with
+ containers which use different values for these options.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 3.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(container_default_behavior) to an explicit
+ value.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: '0.2.0'
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ version_added: '0.2.0'
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: no
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(True).
+ - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ type: bool
+ default: no
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - This option requires Docker API >= 1.25.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: yes
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]).
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "*Note* that from community.general 3.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the I(purge_networks) option.
+ - Note that as opposed to C(docker run ...), M(community.general.docker_container) does not remove the default
+ network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce
+ the removal of the default network (and all other networks not explicitly mentioned in I(networks)).
+ Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from community.general 2.0.0 on.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: yes
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "When networks are provided to the module via the I(networks) option, the module
+ behaves differently than C(docker run --network): C(docker run --network other)
+ will create a container with network C(other) attached, but the default network
+ not attached. This module with I(networks: {name: other}) will create a container
+ with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes),
+ the C(default) network will be removed afterwards."
+ - "If I(networks_cli_compatible) is set to C(yes), this module will behave as
+ C(docker run --network) and will *not* add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "*Note* that docker CLI also sets I(network_mode) to the name of the first network
+ added if C(--network) is specified. For more compatibility with docker CLI, you
+ explicitly have to set I(network_mode) to the name of the first network you're
+ adding. This behavior will change for community.general 3.0.0: then I(network_mode) will
+ automatically be set to the first network name in I(networks) if I(network_mode)
+ is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
+ is C(true)."
+ - Current value is C(no). A new default of C(yes) will be set in community.general 2.0.0.
+ type: bool
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file) or C(journald).
+ type: bool
+ default: no
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
+ Docker SDK for Python (docker) allow all values supported by the Docker daemon.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
+ is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
+ to resolve hostnames."
+ - A value of C(all) will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "*Note:* images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: no
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ type: bool
+ default: no
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: no
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ version_added: '0.2.0'
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: no
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the I(ignore_image) option.
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ trust_image_content:
+ description:
+ - If C(yes), skip image verification.
+ - The option has never been used by the module. It will be removed in community.general 3.0.0.
+ type: bool
+ default: no
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.general.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.general.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.general.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.general.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.general.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.general.docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.general.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.general.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.general.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.general.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ community.general.docker_container:
+ name: sleepy
+ purge_networks: yes
+
+- name: Start a container and use an env file
+ community.general.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # don't restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Start container with block device read limit
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_container). Note that the returned fact will be removed in
+ community.general 2.0.0.
+ - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
+ conflicts with the connection plugin.
+ - Empty if I(state) is C(absent)
+ - If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import os
+import re
+import shlex
+import traceback
+from distutils.version import LooseVersion
+from time import sleep
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+ clean_dict_booleans_for_docker_api,
+ omit_none_from_dict,
+ parse_healthcheck,
+ DOCKER_COMMON_ARGS,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version
+ if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
+ from docker.types import Ulimit, LogConfig
+ from docker import types as docker_types
+ else:
+ from docker.utils.types import Ulimit, LogConfig
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'kernel_memory',
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+
+def is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def parse_port_range(range_or_port, client):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ client.fail('Invalid port: "{0}"'.format(range_or_port))
+
+
+def split_colon_ipv6(text, client):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.auto_remove = None
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cap_drop = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpus = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.detach = None
+ self.debug = None
+ self.devices = None
+ self.device_read_bps = None
+ self.device_write_bps = None
+ self.device_read_iops = None
+ self.device_write_iops = None
+ self.device_requests = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.domainname = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.healthcheck = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.init = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.output_logs = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.mounts = None
+ self.name = None
+ self.network_mode = None
+ self.userns_mode = None
+ self.networks = None
+ self.networks_cli_compatible = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.pids_limit = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.removal_wait_timeout = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.runtime = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.tmpfs = None
+ self.trust_image_content = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+ self.working_dir = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+ self.comparisons = client.comparisons
+
+ # If state is 'absent', parameters do not have to be parsed or interpreted.
+ # Only the container's name is needed.
+ if self.state == 'absent':
+ return
+
+ if self.cpus is not None:
+ self.cpus = int(round(self.cpus * 1E9))
+
+ if self.groups:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports in ('all', 'ALL'):
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.tmpfs = self._parse_tmpfs()
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.sysctls = self._parse_sysctls()
+ self.log_config = self._parse_log_config()
+ try:
+ self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
+ except ValueError as e:
+ self.fail(to_native(e))
+
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+ self.pid_mode = self._replace_container_names(self.pid_mode)
+ self.ipc_mode = self._replace_container_names(self.ipc_mode)
+ self.network_mode = self._replace_container_names(self.network_mode)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ if self.mac_address:
+ # Ensure the MAC address uses colons instead of hyphens for later comparison
+ self.mac_address = self.mac_address.replace('-', ':')
+
+ if self.entrypoint:
+ # convert from list to str.
+ self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
+
+ if self.command:
+ # convert from list to str
+ if isinstance(self.command, list):
+ self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
+
+ self.mounts_opt, self.expected_mounts = self._process_mounts()
+
+ self._check_mount_target_collisions()
+
+ for param_name in ["device_read_bps", "device_write_bps"]:
+ if client.module.params.get(param_name):
+ self._process_rate_bps(option=param_name)
+
+ for param_name in ["device_read_iops", "device_write_iops"]:
+ if client.module.params.get(param_name):
+ self._process_rate_iops(option=param_name)
+
+ if self.device_requests:
+ for dr_index, dr in enumerate(self.device_requests):
+ # Make sure that capabilities are lists of lists of strings
+ if dr['capabilities']:
+ for or_index, or_list in enumerate(dr['capabilities']):
+ for and_index, and_term in enumerate(or_list):
+ if not isinstance(and_term, string_types):
+ self.fail(
+ "device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
+ dr_index, or_index, and_index))
+ or_list[and_index] = to_native(and_term)
+ # Make sure that options is a dictionary mapping strings to strings
+ if dr['options']:
+ dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ cpuset_mems='cpuset_mems',
+ mem_limit='memory',
+ mem_reservation='memory_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory',
+ restart_policy='restart_policy',
+ )
+
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
+ restart_policy = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+ result[key] = restart_policy
+ elif self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ domainname='domainname',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ working_dir='working_dir',
+ stop_timeout='stop_timeout',
+ healthcheck='healthcheck',
+ )
+
+ if self.client.docker_py_version < LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ create_params['cpu_shares'] = 'cpu_shares'
+ create_params['volume_driver'] = 'volume_driver'
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+
+ if self.disable_healthcheck:
+ # Make sure image's health check is overridden
+ result['healthcheck'] = {'test': ['NONE']}
+
+ if self.networks_cli_compatible and self.networks:
+ network = self.networks[0]
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if network.get(para):
+ params[para] = network[para]
+ network_config = dict()
+ network_config[network['name']] = self.client.create_endpoint_config(**params)
+ result['networking_config'] = self.client.create_networking_config(network_config)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ # Only pass anonymous volumes to create container
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params = dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ dns='dns_servers',
+ dns_opt='dns_opts',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ userns_mode='userns_mode',
+ cap_add='capabilities',
+ cap_drop='cap_drop',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ sysctls='sysctls',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ oom_kill_disable='oom_killer',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode',
+ tmpfs='tmpfs',
+ init='init',
+ uts_mode='uts',
+ runtime='runtime',
+ auto_remove='auto_remove',
+ device_read_bps='device_read_bps',
+ device_write_bps='device_write_bps',
+ device_read_iops='device_read_iops',
+ device_write_iops='device_write_iops',
+ pids_limit='pids_limit',
+ mounts='mounts',
+ nano_cpus='cpus',
+ )
+
+ if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
+ # blkio_weight can always be updated, but can only be set on creation
+ # when Docker SDK for Python and Docker API are new enough
+ host_config_params['blkio_weight'] = 'blkio_weight'
+
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ host_config_params['cpu_shares'] = 'cpu_shares'
+ host_config_params['volume_driver'] = 'volume_driver'
+
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ if 'mounts' in params:
+ params['mounts'] = self.mounts_opt
+
+ if self.device_requests is not None:
+ params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
+
+ return self.client.create_host_config(**params)
+
+ @property
+ def default_host_ip(self):
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ try:
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ except NotFound as nfe:
+ self.client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
+ exception=traceback.format_exc()
+ )
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ return 'all'
+
+ default_ip = self.default_host_ip
+
+ binds = {}
+ for port in self.published_ports:
+ parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = parse_port_range(container_port, self.client)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(default_ip,)]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(default_ip, parts[0])]
+ else:
+ port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ return binds
+
+ def _get_volume_binds(self, volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = []
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result.append((parsed_link[0], parsed_link[1]))
+ else:
+ result.append((parsed_link[0], parsed_link[0]))
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_sysctls(self):
+ '''
+ Turn sysctls into an hash of Sysctl objects
+ '''
+ return self.sysctls
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config=dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = dict()
+ for k, v in self.log_options.items():
+ if not isinstance(v, string_types):
+ self.client.module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ self.log_options[k] = v
+ options['Config'][k] = v
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _parse_tmpfs(self):
+ '''
+ Turn tmpfs into a hash of Tmpfs objects
+ '''
+ result = dict()
+ if self.tmpfs is None:
+ return result
+
+ for tmpfs_spec in self.tmpfs:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return result
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if self.env:
+ for name, value in self.env.items():
+ if not isinstance(value, string_types):
+ self.fail("Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+ return network_id
+
+ def _process_mounts(self):
+ if self.mounts is None:
+ return None, None
+ mounts_list = []
+ mounts_expected = []
+ for mount in self.mounts:
+ target = mount['target']
+ datatype = mount['type']
+ mount_dict = dict(mount)
+ # Sanity checks (so we don't wait for docker-py to barf on input)
+ if mount_dict.get('source') is None and datatype != 'tmpfs':
+ self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
+ mount_option_types = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+ )
+ for option, req_datatype in mount_option_types.items():
+ if mount_dict.get(option) is not None and datatype != req_datatype:
+ self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
+ # Handle volume_driver and volume_options
+ volume_driver = mount_dict.pop('volume_driver')
+ volume_options = mount_dict.pop('volume_options')
+ if volume_driver:
+ if volume_options:
+ volume_options = clean_dict_booleans_for_docker_api(volume_options)
+ mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict.get('tmpfs_size') is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
+ if mount_dict.get('tmpfs_mode') is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+ # Fill expected mount dict
+ mount_expected = dict(mount)
+ mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
+ mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
+ # Add result to lists
+ mounts_list.append(docker_types.Mount(**mount_dict))
+ mounts_expected.append(omit_none_from_dict(mount_expected))
+ return mounts_list, mounts_expected
+
+ def _process_rate_bps(self, option):
+ """
+ Format device_read_bps and device_write_bps option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _process_rate_iops(self, option):
+ """
+ Format device_read_iops and device_write_iops option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _replace_container_names(self, mode):
+ """
+ Parse IPC and PID modes. If they contain a container name, replace
+ with the container's ID.
+ """
+ if mode is None or not mode.startswith('container:'):
+ return mode
+ container_name = mode[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve it's ID)
+ container = self.client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return mode
+ return 'container:{0}'.format(container['Id'])
+
+ def _check_mount_target_collisions(self):
+ last = dict()
+
+ def f(t, name):
+ if t in last:
+ if name == last[t]:
+ self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if self.expected_mounts:
+ for t in [m['target'] for m in self.expected_mounts]:
+ f(t, 'mounts')
+ if self.volumes:
+ for v in self.volumes:
+ vs = v.split(':')
+ f(vs[0 if len(vs) == 1 else 1], 'volumes')
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_sysctls = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+ self.parameters.expected_device_requests = None
+ self.parameters_map = dict()
+ self.parameters_map['expected_links'] = 'links'
+ self.parameters_map['expected_ports'] = 'expected_ports'
+ self.parameters_map['expected_exposed'] = 'exposed_ports'
+ self.parameters_map['expected_volumes'] = 'volumes'
+ self.parameters_map['expected_ulimits'] = 'ulimits'
+ self.parameters_map['expected_sysctls'] = 'sysctls'
+ self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
+ self.parameters_map['expected_env'] = 'env'
+ self.parameters_map['expected_entrypoint'] = 'entrypoint'
+ self.parameters_map['expected_binds'] = 'volumes'
+ self.parameters_map['expected_cmd'] = 'command'
+ self.parameters_map['expected_devices'] = 'devices'
+ self.parameters_map['expected_healthcheck'] = 'healthcheck'
+ self.parameters_map['expected_mounts'] = 'mounts'
+ self.parameters_map['expected_device_requests'] = 'device_requests'
+
+ def fail(self, msg):
+ self.parameters.client.fail(msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Status') == 'removing'
+ return False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ @property
+ def paused(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Paused', False)
+ return False
+
+ def _compare(self, a, b, compare):
+ '''
+ Compare values a and b as described in compare.
+ '''
+ return compare_generic(a, b, compare['comparison'], compare['type'])
+
+ def _decode_mounts(self, mounts):
+ if not mounts:
+ return mounts
+ result = []
+ empty_dict = dict()
+ for mount in mounts:
+ res = dict()
+ res['type'] = mount.get('Type')
+ res['source'] = mount.get('Source')
+ res['target'] = mount.get('Target')
+ res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
+ res['consistency'] = mount.get('Consistency')
+ res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
+ res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
+ res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
+ res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
+ res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
+ res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
+ res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
+ result.append(res)
+ return result
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+ self.parameters.expected_healthcheck = self._get_expected_healthcheck()
+ self.parameters.expected_device_requests = self._get_expected_device_requests()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ expected_cmd=config.get('Cmd'),
+ domainname=config.get('Domainname'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ init=host_config.get('Init'),
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ cap_drop=host_config.get('CapDrop'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ mac_address=config.get('MacAddress', network.get('MacAddress')),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ userns_mode=host_config.get('UsernsMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ runtime=host_config.get('Runtime'),
+ shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecurityOpt"),
+ stop_signal=config.get("StopSignal"),
+ tmpfs=host_config.get('Tmpfs'),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ expected_sysctls=host_config.get('Sysctls'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volume_driver=host_config.get('VolumeDriver'),
+ volumes_from=host_config.get('VolumesFrom'),
+ working_dir=config.get('WorkingDir'),
+ publish_all_ports=host_config.get('PublishAllPorts'),
+ expected_healthcheck=config.get('Healthcheck'),
+ disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
+ device_read_bps=host_config.get('BlkioDeviceReadBps'),
+ device_write_bps=host_config.get('BlkioDeviceWriteBps'),
+ device_read_iops=host_config.get('BlkioDeviceReadIOps'),
+ device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
+ expected_device_requests=host_config.get('DeviceRequests'),
+ pids_limit=host_config.get('PidsLimit'),
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ expected_mounts=self._decode_mounts(host_config.get('Mounts')),
+ cpus=host_config.get('NanoCpus'),
+ )
+ # Options which don't make sense without their accompanying option
+ if self.parameters.log_driver:
+ config_mapping['log_driver'] = log_config.get('Type')
+ config_mapping['log_options'] = log_config.get('Config')
+
+ if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
+ # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
+ # it has a default value, that's why we have to jump through the hoops here
+ config_mapping['auto_remove'] = host_config.get('AutoRemove')
+
+ if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
+ # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
+ # stop_timeout has a hybrid role, in that it used to be something only used
+ # for stopping containers, and is now also used as a container property.
+ # That's why it needs special handling here.
+ config_mapping['stop_timeout'] = config.get('StopTimeout')
+
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # For docker API < 1.22, update_container() is not supported. Thus
+ # we need to handle all limits which are usually handled by
+ # update_container() as configuration changes which require a container
+ # restart.
+ restart_policy = host_config.get('RestartPolicy', dict())
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ config_mapping.update(dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ ))
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
+ if not minimal_version.get('supported', True):
+ continue
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
+ if getattr(self.parameters, key, None) is not None:
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ p = getattr(self.parameters, key)
+ c = value
+ if compare['type'] == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif compare['type'] == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if key == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(key, parameter=p, active=c)
+
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # update_container() call not supported
+ return False, []
+
+ host_config = self.container['HostConfig']
+
+ restart_policy = host_config.get('RestartPolicy') or dict()
+
+ config_mapping = dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ )
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None):
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ differences.add(key, parameter=getattr(self.parameters, key), active=value)
+ different = not differences.empty
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if self.parameters.published_ports is None:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links:
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = parts + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_device_requests(self):
+ if self.parameters.device_requests is None:
+ return None
+ device_requests = []
+ for dr in self.parameters.device_requests:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
+ expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = dict()
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Env'):
+ for env_var in image[self.parameters.client.image_inspect_source]['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
+ image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_sysctls(self, config_sysctls):
+ self.log('_get_expected_sysctls')
+ if config_sysctls is None:
+ return None
+ result = dict()
+ for key, value in config_sysctls.items():
+ result[key] = to_text(value, errors='surrogate_or_strict')
+ return result
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+ def _normalize_port(self, port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+ def _get_expected_healthcheck(self):
+ self.log('_get_expected_healthcheck')
+ expected_healthcheck = dict()
+
+ if self.parameters.healthcheck:
+ expected_healthcheck.update([(k.title().replace("_", ""), v)
+ for k, v in self.parameters.healthcheck.items()])
+
+ return expected_healthcheck
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
+ client.module.warn('log_options is ignored when log_driver is not specified')
+ if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
+ client.module.warn('healthcheck is ignored when test is not specified')
+ if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
+ client.module.warn('restart_retries is ignored when restart_policy is not specified')
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['ansible_facts'] = {'docker_container': self.facts}
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.client.get_container_by_id(container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image = self._get_image()
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.parameters.image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if self.parameters.comparisons['image']['comparison'] == 'strict':
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.parameters.image
+ if not image_to_use and container and container.Image:
+ image_to_use = container.Image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(image_to_use, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.Id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
+ self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.parameters.paused:
+ self.client.pause(container=container.Id)
+ else:
+ self.client.unpause(container=container.Id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
+ ))
+ container = self._get_container(container.Id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.parameters.paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.Id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.fail(msg, **kwargs)
+
+ def _output_logs(self, msg):
+ self.client.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ if is_image_name_id(self.parameters.image):
+ image = self.client.find_image_by_id(self.parameters.image)
+ else:
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not image or self.parameters.pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
+ has_network_differences, network_differences = container.has_network_differences()
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if diff['parameter'].get(para):
+ params[para] = diff['parameter'][para]
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ self.client.report_warnings(new_container)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.parameters.detach is False:
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ status = self.client.wait(container_id)['StatusCode']
+ else:
+ status = self.client.wait(container_id)
+ if self.parameters.auto_remove:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.parameters.output_logs:
+ self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ config = self.client.inspect_container(container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+
+ if logging_driver in ('json-file', 'journald'):
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if self.parameters.output_logs:
+ self._output_logs(msg=output)
+ else:
+ output = "Result logged using `%s` driver" % logging_driver
+
+ if status != 0:
+ self.fail(output, status=status)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ result = self.client.update_container(container_id, **update_parameters)
+ self.client.report_warnings(result)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ dummy = self.client.restart(container_id)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+
+def detect_ipvX_address_usage(client):
+ '''
+ Helper function to detect whether any specified network uses ipv4_address or ipv6_address
+ '''
+ for network in client.module.params.get("networks") or []:
+ if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
+ return True
+ return False
+
+
+class AnsibleDockerClientContainer(AnsibleDockerClient):
+ # A list of module options which are not docker container properties
+ __NON_CONTAINER_PROPERTY_OPTIONS = tuple([
+ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
+ 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
+ 'output_logs', 'paused', 'removal_wait_timeout'
+ ] + list(DOCKER_COMMON_ARGS.keys()))
+
+ def _parse_comparisons(self):
+ comparisons = {}
+ comp_aliases = {}
+ # Put in defaults
+ explicit_types = dict(
+ command='list',
+ devices='set(dict)',
+ device_requests='set(dict)',
+ dns_search_domains='list',
+ dns_servers='list',
+ env='set',
+ entrypoint='list',
+ etc_hosts='set',
+ mounts='set(dict)',
+ networks='set(dict)',
+ ulimits='set(dict)',
+ device_read_bps='set(dict)',
+ device_write_bps='set(dict)',
+ device_read_iops='set(dict)',
+ device_write_iops='set(dict)',
+ )
+ all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
+ default_values = dict(
+ stop_timeout='ignore',
+ )
+ for option, data in self.module.argument_spec.items():
+ all_options.add(option)
+ for alias in data.get('aliases', []):
+ all_options.add(alias)
+ # Ignore options which aren't used as container properties
+ if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
+ continue
+ # Determine option type
+ if option in explicit_types:
+ datatype = explicit_types[option]
+ elif data['type'] == 'list':
+ datatype = 'set'
+ elif data['type'] == 'dict':
+ datatype = 'dict'
+ else:
+ datatype = 'value'
+ # Determine comparison type
+ if option in default_values:
+ comparison = default_values[option]
+ elif datatype in ('list', 'value'):
+ comparison = 'strict'
+ else:
+ comparison = 'allow_more_present'
+ comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
+ # Keep track of aliases
+ comp_aliases[option] = option
+ for alias in data.get('aliases', []):
+ comp_aliases[alias] = option
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ comparisons['image']['comparison'] = 'ignore'
+ if self.module.params['purge_networks']:
+ comparisons['networks']['comparison'] = 'strict'
+ # Process options
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option, v in comparisons.items():
+ if option == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ v['comparison'] = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ comparisons[key_main]['comparison'] = value
+ elif value == 'allow_more_present':
+ if comparisons[key_main]['type'] == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ comparisons[key_main]['comparison'] = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Add implicit options
+ comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
+ comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
+ comparisons['disable_healthcheck'] = dict(type='value',
+ comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
+ name='disable_healthcheck')
+ # Check legacy values
+ if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+ self.comparisons = comparisons
+
+ def _get_additional_minimal_versions(self):
+ stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
+ stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
+ if stop_timeout_supported:
+ stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
+ "the container's stop_timeout configuration. "
+ "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
+ else:
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
+ "update the container's stop_timeout configuration." % (self.docker_api_version_str,))
+ self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
+
+ def __init__(self, **kwargs):
+ option_minimal_versions = dict(
+ # internal options
+ log_config=dict(),
+ publish_all_ports=dict(),
+ ports=dict(),
+ volume_binds=dict(),
+ name=dict(),
+ # normal options
+ device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
+ dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
+ ipc_mode=dict(docker_api_version='1.25'),
+ mac_address=dict(docker_api_version='1.25'),
+ oom_score_adj=dict(docker_api_version='1.22'),
+ shm_size=dict(docker_api_version='1.22'),
+ stop_signal=dict(docker_api_version='1.21'),
+ tmpfs=dict(docker_api_version='1.22'),
+ volume_driver=dict(docker_api_version='1.21'),
+ memory_reservation=dict(docker_api_version='1.21'),
+ kernel_memory=dict(docker_api_version='1.21'),
+ auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
+ init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
+ userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
+ pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
+ # specials
+ ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
+ detect_usage=detect_ipvX_address_usage,
+ usage_msg='ipv4_address or ipv6_address in networks'),
+ stop_timeout=dict(), # see _get_additional_minimal_versions()
+ )
+
+ super(AnsibleDockerClientContainer, self).__init__(
+ option_minimal_versions=option_minimal_versions,
+ option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
+ **kwargs
+ )
+
+ self.image_inspect_source = 'Config'
+ if self.docker_api_version < LooseVersion('1.21'):
+ self.image_inspect_source = 'ContainerConfig'
+
+ self._get_additional_minimal_versions()
+ self._parse_comparisons()
+
+ if self.module.params['container_default_behavior'] is None:
+ self.module.params['container_default_behavior'] = 'compatibility'
+ self.module.deprecate(
+ 'The container_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 3.0.0. To remove this warning, please specify an explicit value for it now',
+ version='3.0.0', collection_name='community.general' # was Ansible 2.14
+ )
+ if self.module.params['container_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory="0",
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+
+def main():
+ argument_spec = dict(
+ auto_remove=dict(type='bool'),
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='raw'),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpus=dict(type='float'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ detach=dict(type='bool'),
+ devices=dict(type='list', elements='str'),
+ device_read_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_write_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_read_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_write_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_requests=dict(type='list', elements='dict', options=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ dns_servers=dict(type='list', elements='str'),
+ dns_opts=dict(type='list', elements='str'),
+ dns_search_domains=dict(type='list', elements='str'),
+ domainname=dict(type='str'),
+ entrypoint=dict(type='list', elements='str'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ init=dict(type='bool'),
+ interactive=dict(type='bool'),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list', elements='str'),
+ log_driver=dict(type='str'),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ mounts=dict(type='list', elements='dict', options=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ )),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ )),
+ networks_cli_compatible=dict(type='bool'),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pid_mode=dict(type='str'),
+ pids_limit=dict(type='int'),
+ privileged=dict(type='bool'),
+ published_ports=dict(type='list', elements='str', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int'),
+ runtime=dict(type='str'),
+ security_opts=dict(type='list', elements='str'),
+ shm_size=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ sysctls=dict(type='dict'),
+ tmpfs=dict(type='list', elements='str'),
+ trust_image_content=dict(type='bool', default=False, removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ ulimits=dict(type='list', elements='str'),
+ user=dict(type='str'),
+ userns_mode=dict(type='str'),
+ uts=dict(type='str'),
+ volume_driver=dict(type='str'),
+ volumes=dict(type='list', elements='str'),
+ volumes_from=dict(type='list', elements='str'),
+ working_dir=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClientContainer(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']:
+ client.module.deprecate(
+ 'Please note that docker_container handles networks slightly different than docker CLI. '
+ 'If you specify networks, the default network will still be attached as the first network. '
+ '(You can specify purge_networks to remove all networks not explicitly listed.) '
+ 'This behavior will change in community.general 2.0.0. You can change the behavior now by setting '
+ 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting '
+ 'it to `no`',
+ version='2.0.0', collection_name='community.general', # was Ansible 2.12
+ )
+ if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
+ client.module.deprecate(
+ 'Please note that the default value for `network_mode` will change from not specified '
+ '(which is equal to `default`) to the name of the first network in `networks` if '
+ '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
+ 'change the behavior now by explicitly setting `network_mode` to the name of the first '
+ 'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
+ 'Please make sure that the value you set to `network_mode` equals the inspection result '
+ 'for existing containers, otherwise the module will recreate them. You can find out the '
+ 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
+ version='3.0.0', collection_name='community.general', # was Ansible 2.14
+ )
+
+ try:
+ cm = ContainerManager(client)
+ client.module.exit_json(**sanitize_result(cm.results))
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py
new file mode 100644
index 00000000..80025067
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.general.docker_container)
+ returns for a non-absent container.
+
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.general.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py
new file mode 100644
index 00000000..674f8ad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.general.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.general.docker_host_info:
+ images: yes
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.general.docker_host_info:
+ images: yes
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.general.docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.general.docker_host_info:
+ disk_usage: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(yes)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(yes)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(yes)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(images) is C(yes)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=yes). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(yes)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # Missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import clean_dict_booleans_for_docker_api
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ items = self.client.containers(**filter_arg)
+ elif docker_object == 'networks':
+ items = self.client.networks(**filter_arg)
+ elif docker_object == 'images':
+ items = self.client.images(**filter_arg)
+ elif docker_object == 'volumes':
+ items = self.client.volumes(**filter_arg)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ if docker_object != 'volumes':
+ return items
+ else:
+ return items['Volumes']
+
+ if docker_object == 'volumes':
+ items = items['Volumes']
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ option_minimal_versions = dict(
+ network_filters=dict(docker_py_version='2.0.2'),
+ disk_usage=dict(docker_py_version='2.2.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py
new file mode 100644
index 00000000..1e2976be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images.
+
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging an
+ image into a repository and archiving an image to a .tar file.
+ - Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
+ C(load), C(pull) or C(local)). This will be required from community.general 2.0.0 on.
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon, i.e. do not try to build, pull or load the image."
+ - "Before community.general 2.0.0, the value of this option will be auto-detected
+ to be backwards compatible, but a warning will be issued if it is not
+ explicitly specified. From community.general 2.0.0 on, auto-detection will be disabled
+ and this option will be made mandatory."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: yes
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: yes
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: no
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(yes) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ - Needs Docker SDK for Python >= 3.7.0.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used (except
+ if I(path) is specified as well, in which case building will take precedence).
+ From community.general 2.0.0 on, you have to set I(source) to C(load).
+ type: path
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ - Please use I(build.dockerfile) instead. This option will be removed in community.general 2.0.0.
+ type: str
+ force:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
+ C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
+ to force tagging an image.
+ - Please stop using this option, and use the more specialized force options
+ I(force_source), I(force_absent) and I(force_tag) instead.
+ - This option will be removed in community.general 2.0.0.
+ type: bool
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ - Please use I(build.http_timeout) instead. This option will be removed in community.general 2.0.0.
+ type: int
+ name:
+ description:
+ - "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
+ When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
+ - Note that image IDs (hashes) are not supported.
+ type: str
+ required: yes
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ - Set I(source) to C(build) if you want to build the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used. From community.general 2.0.0
+ on, you have to set I(source) to C(build).
+ - Please use I(build.path) instead. This option will be removed in community.general 2.0.0.
+ type: path
+ aliases:
+ - build_path
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - Please use I(build.pull) instead. This option will be removed in community.general 2.0.0.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: no
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ - Please use I(build.rm) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: yes
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ - Please use I(build.nocache) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: no
+ repository:
+ description:
+ - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
+ format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ - By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
+ this will change in community.general 2.0.0, so to make sure that you are pulling, set I(source) to C(pull). To build
+ the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
+ to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
+ a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
+ - "*Note:* C(state=build) is DEPRECATED and will be removed in community.general 2.0.0. Specifying C(build) will behave the
+ same as C(present)."
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ - build
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+ buildargs:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ - Please use I(build.args) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ - Please use I(build.container_limits) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_tls:
+ description:
+ - "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
+ C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
+ the server's certificate is valid for the server."
+ - "*Note:* If you specify this option, it will set the value of the I(tls) or
+ I(validate_certs) parameters if not set to C(no)."
+ - Will be removed in community.general 2.0.0.
+ type: str
+ choices:
+ - 'no'
+ - 'encrypt'
+ - 'verify'
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.general.docker_image:
+ name: pacur/centos-7
+ source: pull
+
+- name: Tag and push to docker hub
+ community.general.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: yes
+ source: local
+
+- name: Tag and push to local registry
+ community.general.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: yes
+ source: local
+
+- name: Add tag latest to image
+ community.general.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: yes
+ source: local
+
+- name: Remove image
+ community.general.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.general.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: yes
+ source: build
+
+- name: Archive image
+ community.general.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.general.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: yes
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.general.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.general.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.3.0
+'''
+
+import errno
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ clean_dict_booleans_for_docker_api,
+ docker_version,
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+if docker_version is not None:
+ try:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.auth import resolve_repository_name
+ else:
+ from docker.auth.auth import resolve_repository_name
+ from docker.utils.utils import parse_repository_tag
+ from docker.errors import DockerException
+ except ImportError:
+ # missing Docker SDK for Python handled in module_utils.docker.common
+ pass
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ self.archive_path = parameters.get('archive_path')
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters.get('force_source')
+ self.force_absent = parameters.get('force_absent')
+ self.force_tag = parameters.get('force_tag')
+ self.load_path = parameters.get('load_path')
+ self.name = parameters.get('name')
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters.get('repository')
+ self.rm = build.get('rm', True)
+ self.state = parameters.get('state')
+ self.tag = parameters.get('tag')
+ self.http_timeout = build.get('http_timeout')
+ self.push = parameters.get('push')
+ self.buildargs = build.get('args')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.remove_image(name, force=self.force_absent)
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, str(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name - name of the image. Type: str
+ :return None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ image = self.client.find_image(name=name, tag=tag)
+ if not image:
+ self.log("archive image: image %s:%s not found" % (name, tag))
+ return
+
+ image_name = "%s:%s" % (name, tag)
+ self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ image = self.client.get_image(image_name)
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, str(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ if self.client.docker_py_version >= LooseVersion('3.0.0'):
+ for chunk in image:
+ fd.write(chunk)
+ else:
+ for chunk in image.stream(2048, decode_content=False):
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
+
+ image = self.client.find_image(name=name, tag=tag)
+ if image:
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if re.search('unauthorized', str(exc)):
+ if re.search('authentication required', str(exc)):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, str(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, str(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ image_name = name
+ if tag and not re.search(tag, name):
+ image_name = "%s:%s" % (name, tag)
+ tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
+ if not tag_status:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % str(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ params = dict(
+ path=self.build_path,
+ tag=self.name,
+ rm=self.rm,
+ nocache=self.nocache,
+ timeout=self.http_timeout,
+ pull=self.pull,
+ forcerm=self.rm,
+ dockerfile=self.dockerfile,
+ decode=True,
+ )
+ if self.client.docker_py_version < LooseVersion('3.0.0'):
+ params['stream'] = True
+
+ if self.tag:
+ params['tag'] = "%s:%s" % (self.name, self.tag)
+ if self.container_limits:
+ params['container_limits'] = self.container_limits
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ self.buildargs[key] = to_native(value)
+ params['buildargs'] = self.buildargs
+ if self.cache_from:
+ params['cache_from'] = self.cache_from
+ if self.network:
+ params['network_mode'] = self.network
+ if self.extra_hosts:
+ params['extra_hosts'] = self.extra_hosts
+ if self.use_config_proxy:
+ params['use_config_proxy'] = self.use_config_proxy
+ # Due to a bug in docker-py, it will crash if
+ # use_config_proxy is True and buildargs is None
+ if 'buildargs' not in params:
+ params['buildargs'] = {}
+ if self.target:
+ params['target'] = self.target
+
+ build_output = []
+ for line in self.client.build(**params):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ build_line = line.get("stream") or line.get("status") or ''
+ build_output.append(build_line)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {"stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag)}
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ output = self.client.load_image(image_tar)
+ if output is not None:
+ # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
+ # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
+ # Note that before that commit, something else than None was returned, but that was also
+ # only introduced in a commit that first appeared in 2.5.0 (see
+ # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
+ # So the above check works for every released version of Docker SDK for Python.
+ has_output = True
+ for line in output:
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ load_line = line.get("stream") or line.get("status") or ''
+ load_output.append(load_line)
+ else:
+ if LooseVersion(docker_version) < LooseVersion('2.5.0'):
+ self.client.module.warn(
+ 'The installed version of the Docker SDK for Python does not return the loading results'
+ ' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
+ ' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
+ ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
+ ' (2.5.0 was released in August 2017).'
+ )
+ else:
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ expected_image = '%s:%s' % (self.name, self.tag)
+ if expected_image not in loaded_images:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(["'%s'" % image for image in sorted(loaded_images)]), ))
+
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool'),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ )),
+ archive_path=dict(type='path'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ dockerfile=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ http_timeout=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ nocache=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ path=dict(type='path', aliases=['build_path'], removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ pull=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ rm=dict(type='bool', default=True, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
+ tag=dict(type='str', default='latest'),
+ use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ buildargs=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ )
+
+ required_if = [
+ # ('state', 'present', ['source']), -- enable in community.general 2.0.0
+ # ('source', 'build', ['build']), -- enable in community.general 2.0.0
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_build_cache_from(client):
+ return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
+
+ def detect_build_network(client):
+ return client.module.params['build'] and client.module.params['build'].get('network') is not None
+
+ def detect_build_target(client):
+ return client.module.params['build'] and client.module.params['build'].get('target') is not None
+
+ def detect_use_config_proxy(client):
+ return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
+ option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
+ option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
+ option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
+ option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.20',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if client.module.params['state'] == 'build':
+ client.module.deprecate('The "build" state has been deprecated for a long time. '
+ 'Please use "present", which has the same meaning as "build".',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+ client.module.params['state'] = 'present'
+ if client.module.params['use_tls']:
+ client.module.deprecate('The "use_tls" option has been deprecated for a long time. '
+ 'Please use the "tls" and "validate_certs" options instead.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ build_options = dict(
+ container_limits='container_limits',
+ dockerfile='dockerfile',
+ http_timeout='http_timeout',
+ nocache='nocache',
+ path='path',
+ pull='pull',
+ rm='rm',
+ buildargs='args',
+ )
+ for option, build_option in build_options.items():
+ default_value = None
+ if option in ('rm', ):
+ default_value = True
+ elif option in ('nocache', ):
+ default_value = False
+ if client.module.params[option] != default_value:
+ if client.module.params['build'] is None:
+ client.module.params['build'] = dict()
+ if client.module.params['build'].get(build_option, default_value) != default_value:
+ client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
+ client.module.params['build'][build_option] = client.module.params[option]
+ client.module.deprecate('Please specify build.%s instead of %s. The %s option '
+ 'has been renamed' % (build_option, option, option),
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if client.module.params['source'] == 'build':
+ if (not client.module.params['build'] or not client.module.params['build'].get('path')):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+ if client.module.params['build'].get('pull') is None:
+ client.module.deprecate("The default for build.pull is currently 'yes', but will be changed to "
+ "'no' in community.general 2.0.0. Please set build.pull explicitly to the value you need",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ client.module.params['build']['pull'] = True # TODO: change to False in community.general 2.0.0
+
+ if client.module.params['state'] == 'present' and client.module.params['source'] is None:
+ # Autodetection. To be removed in community.general 2.0.0.
+ if (client.module.params['build'] or dict()).get('path'):
+ client.module.params['source'] = 'build'
+ elif client.module.params['load_path']:
+ client.module.params['source'] = 'load'
+ else:
+ client.module.params['source'] = 'pull'
+ client.module.deprecate('The value of the "source" option was determined to be "%s". '
+ 'Please set the "source" option explicitly. Autodetection will '
+ 'be removed in community.general 2.0.0.' % client.module.params['source'],
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ if client.module.params['force']:
+ client.module.params['force_source'] = True
+ client.module.params['force_absent'] = True
+ client.module.params['force_tag'] = True
+ client.module.deprecate('The "force" option will be removed in community.general 2.0.0. Please '
+ 'use the "force_source", "force_absent" or "force_tag" option '
+ 'instead, depending on what you want to force.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py
new file mode 100644
index 00000000..6522e642
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+#
+# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the "docker login" command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ email:
+ description:
+ - Does nothing, do not use.
+ - Will be removed in community.general 3.0.0.
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: no
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "L(Python bindings for docker credentials store API) >= 0.2.1
+ (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
+ - "Docker API >= 1.20"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.general.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: yes
+
+- name: Log into DockerHub using a custom config file
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.general.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when state='present'
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import re
+import traceback
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from docker.errors import DockerException
+ from docker import auth
+
+ # Earlier versions of docker/docker-py put decode_auth
+ # in docker.auth.auth instead of docker.auth
+ if hasattr(auth, 'decode_auth'):
+ from docker.auth import decode_auth
+ else:
+ from docker.auth.auth import decode_auth
+
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ HAS_DOCKER_PY,
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+ EMAIL_REGEX,
+ RequestException,
+)
+
+NEEDS_DOCKER_PYCREDS = False
+
+# Early versions of docker/docker-py rely on docker-pycreds for
+# the credential store api.
+if HAS_DOCKER_PY:
+ try:
+ from docker.credentials.errors import StoreError, CredentialsNotFound
+ from docker.credentials import Store
+ except ImportError:
+ try:
+ from dockerpycreds.errors import StoreError, CredentialsNotFound
+ from dockerpycreds.store import Store
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ NEEDS_DOCKER_PYCREDS = True
+
+
+if NEEDS_DOCKER_PYCREDS:
+ # docker-pycreds missing, so we need to create some place holder classes
+ # to allow instantiation.
+
+ class StoreError(Exception):
+ pass
+
+ class CredentialsNotFound(Exception):
+ pass
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.email = parameters.get('email')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ if self.email and not re.match(EMAIL_REGEX, self.email):
+ self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
+ "/%s/" % (EMAIL_REGEX))
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=self.reauthorize,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=True,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ # Older versions of docker-py don't have this feature.
+ try:
+ credstore_env = self.client.credstore_env
+ except AttributeError:
+ credstore_env = None
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ if hasattr(auth, 'get_credential_store'):
+ store_name = auth.get_credential_store(config, registry)
+ elif 'credsStore' in config:
+ store_name = config['credsStore']
+ else:
+ store_name = None
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py
new file mode 100644
index 00000000..f70cc67d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the "docker network" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: yes
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: no
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: no
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_options:
+ description:
+ - Dictionary of IPAM options.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
+ options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
+ the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
+ parameter.
+ type: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.10.0"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.general.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.general.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: yes
+
+- name: Create a network with driver options
+ community.general.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.general.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+
+- name: Create a network with labels
+ community.general.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.general.docker_network:
+ name: network_one
+ state: absent
+ force: yes
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ docker_version,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.types import IPAMPool, IPAMConfig
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
+ self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
+ self.parameters.ipam_config = [self.parameters.ipam_options]
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(str(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ params = dict(
+ driver=self.parameters.driver,
+ options=self.parameters.driver_options,
+ )
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ ipam_pools.append(IPAMPool(**ipam_pool))
+ else:
+ ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add ipam parameter if a driver was specified or if IPAM parameters
+ # were specified. Leaving this parameter away can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools,
+ options=self.parameters.ipam_driver_options)
+ else:
+ params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools)
+
+ if self.parameters.enable_ipv6 is not None:
+ params['enable_ipv6'] = self.parameters.enable_ipv6
+ if self.parameters.internal is not None:
+ params['internal'] = self.parameters.internal
+ if self.parameters.scope is not None:
+ params['scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ params['attachable'] = self.parameters.attachable
+ if self.parameters.labels:
+ params['labels'] = self.parameters.labels
+
+ if not self.check_mode:
+ resp = self.client.create_network(self.parameters.name, **params)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.remove_network(self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ self.client.connect_container_to_network(name, self.parameters.name)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name),
+ parameter=True,
+ active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ self.client.disconnect_container_from_network(container_name, self.parameters.name)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['ansible_facts'] = {u'docker_network': network_facts}
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_options=dict(type='dict', default={}, options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ mutually_exclusive = [
+ ('ipam_config', 'ipam_options')
+ ]
+
+ option_minimal_versions = dict(
+ scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
+ labels=dict(docker_api_version='1.23'),
+ ipam_driver_options=dict(docker_py_version='2.0.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.22',
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py
new file mode 100644
index 00000000..feeff6a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.general.docker_network)
+ returns for a non-absent network.
+
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.general.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py
new file mode 100644
index 00000000..12980e5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: yes
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.general.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.general.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.general.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.general.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.general.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py
new file mode 100644
index 00000000..c01edadc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (i.e. the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.general.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.general.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.general.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.general.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py
new file mode 100644
index 00000000..025c6130
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ - Requires version 3.3.0 of the Docker SDK for Python or newer.
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.general.docker_prune:
+ containers: yes
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+
+- name: Prune everything (including non-dangling images)
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ images_filters:
+ dangling: false
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: '0'
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: '0'
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: '0'
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: '0'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+try:
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api
+except Exception as dummy:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ min_docker_api_version='1.25',
+ min_docker_version='2.1.0',
+ )
+
+ # Version checks
+ cache_min_version = '3.3.0'
+ if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
+ msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
+ client.fail(msg % (docker_version, cache_min_version))
+
+ try:
+ result = dict()
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ res = client.prune_containers(filters=filters)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ res = client.prune_images(filters=filters)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ res = client.prune_networks(filters=filters)
+ result['networks'] = res.get('NetworksDeleted') or []
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ res = client.prune_volumes(filters=filters)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['builder_cache']:
+ res = client.prune_builds()
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py
new file mode 100644
index 00000000..b6ce7f28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+options:
+ data:
+ description:
+ - The value of the secret. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.general.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the secret data
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove secret foo
+ community.general.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ for secret in secrets:
+ if secret['Spec']['Name'] == self.name:
+ return secret
+ return None
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ secret = self.get_secret()
+ if secret:
+ self.results['secret_id'] = secret['ID']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ secret = self.get_secret()
+ if secret:
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.1.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py
new file mode 100644
index 00000000..d3089e20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the 'docker stack' command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: yes
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: no
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: no
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+
+notes:
+ - Return values I(out) and I(err) have been deprecated and will be removed in community.general 3.0.0. Use I(stdout) and I(stderr) instead.
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.general.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a " +
+ "string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ err=err, # Deprecated
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py
new file mode 100644
index 00000000..74a3648d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+version_added: "1.0.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample: >
+ "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
+ returned: always
+ type: list
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=False
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py
new file mode 100644
index 00000000..966a4266
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: yes
+version_added: "1.1.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=False
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py
new file mode 100644
index 00000000..52f37643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py
@@ -0,0 +1,675 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.general.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(community.general.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.general.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.general.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.general.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.general.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.general.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.general.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ community.general.docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py
new file mode 100644
index 00000000..f6d5fad1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: no
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: no
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.general.docker_swarm_info:
+ ignore_errors: yes
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.general.docker_swarm_info:
+ unlock_key: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(yes)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(services) is C(yes)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(yes)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py
new file mode 100644
index 00000000..7c6f23a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py
@@ -0,0 +1,3004 @@
+#!/usr/bin/python
+#
+# (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(placement.constraints) instead.
+ type: list
+ elements: str
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (e.g. C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: bool
+ default: no
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Requires API version >= 1.25.
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ version_added: '0.2.0'
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ limit_cpu:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.cpus) instead.
+ type: float
+ limit_memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.memory) instead.
+ type: str
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ log_driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.driver) instead.
+ type: str
+ log_driver_options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.options) instead.
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: yes
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: yes
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: yes
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: yes
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ reserve_cpu:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.cpus) instead.
+ type: float
+ reserve_memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.memory) instead.
+ type: str
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: no
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ restart_policy:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.condition) instead.
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ restart_policy_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.max_attempts) instead.
+ type: int
+ restart_policy_delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.delay) instead.
+ type: raw
+ restart_policy_window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.window) instead.
+ type: raw
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ update_delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(10).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.delay) instead.
+ type: raw
+ update_parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(1).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.parallelism) instead.
+ type: int
+ update_failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.failure_action) instead.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ update_monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.monitor) instead.
+ type: raw
+ update_max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.max_failure_ratio) instead.
+ type: float
+ update_order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.order) instead.
+ type: str
+ choices:
+ - stop-first
+ - start-first
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.24"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+
+- name: Set configs
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.general.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.general.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import operator
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return (
+ self.docker_api_version >= LooseVersion('1.25') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ default=params['restart_policy']
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ default=params['restart_policy_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ default=params['restart_policy_attempts']
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ default=params['restart_policy_window']
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ default=params['update_parallelism']
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ default=params['update_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ default=params['update_failure_action']
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ default=params['update_monitor']
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ default=params['update_max_failure_ratio']
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ default=params['update_order']
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ default=params['log_driver']
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ default=params['log_driver_options']
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ default=params['limit_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ default=params['limit_memory']
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ default=params['reserve_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ default=params['reserve_memory']
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement,
+ default=params['constraints']
+ )
+
+ preferences = placement.get('preferences')
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+ publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'PublishedPort': port['published_port'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': int(port['PublishedPort']),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, e)
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], e)
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % e
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ update_failure_action = client.module.params['update_failure_action']
+ failure_action = rollback_config_failure_action or update_failure_action
+ return failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', options=dict(
+ secret_id=dict(type='str'),
+ secret_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ log_driver=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ log_driver_options=dict(type='dict', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=True),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ )),
+ constraints=dict(type='list', elements='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ limit_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ limit_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ reserve_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ reserve_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ restart_policy=dict(
+ type='str',
+ choices=['none', 'on-failure', 'any'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ restart_policy_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_attempts=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_window=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_parallelism=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ update_monitor=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_max_failure_ratio=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_order=dict(
+ type='str',
+ choices=['stop-first', 'start-first'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ constraints=dict(docker_py_version='2.4.0'),
+ dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ docker_api_version='1.25',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py
new file mode 100644
index 00000000..130be7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+#
+# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.general.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py
new file mode 100644
index 00000000..dca92df5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the "docker volume" CLI subcommand.
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: yes
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
+ type: dict
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ force:
+ description:
+ - With state C(present) causes the volume to be deleted and recreated if the volume already
+ exist and the driver, driver options or labels differ. This will cause any data in the existing
+ volume to be lost.
+ - Deprecated. Will be removed in community.general 2.0.0. Set I(recreate) to C(options-changed) instead
+ for the same behavior of setting I(force) to C(yes).
+ type: bool
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause **any data in the existing volume
+ to be lost!** The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.9.0"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.general.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.general.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.general.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_volume). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ AnsibleDockerClient,
+ DifferenceTracker,
+ RequestException,
+)
+from ansible.module_utils.six import iteritems, text_type
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.force = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+ if self.force is not None:
+ if self.recreate != 'never':
+ client.fail('Cannot use the deprecated "force" '
+ 'option when "recreate" is set. Please stop '
+ 'using the force option.')
+ client.module.warn('The "force" option of docker_volume has been deprecated '
+ 'in Ansible 2.8. Please use the "recreate" '
+ 'option, which provides the same functionality as "force".')
+ self.recreate = 'options-changed' if self.force else 'never'
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.volumes()
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ params = dict(
+ driver=self.parameters.driver,
+ driver_opts=self.parameters.driver_options,
+ )
+
+ if self.parameters.labels is not None:
+ params['labels'] = self.parameters.labels
+
+ resp = self.client.create_volume(self.parameters.volume_name, **params)
+ self.existing_volume = self.client.inspect_volume(resp['Name'])
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.remove_volume(self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['ansible_facts'] = {u'docker_volume': volume_facts}
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ # "The docker server >= 1.9.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py
new file mode 100644
index 00000000..c00c2425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the "docker volume inspect" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: yes
+ aliases:
+ - volume_name
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.general.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.inspect_volume(volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % exc)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py
new file mode 100644
index 00000000..b7b57fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2020, Yann Amar <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dpkg_divert
+short_description: Override a debian package's version of a file
+version_added: '0.2.0'
+author:
+ - quidame (@quidame)
+description:
+ - A diversion is for C(dpkg) the knowledge that only a given package
+ (or the local administrator) is allowed to install a file at a given
+ location. Other packages shipping their own version of this file will
+ be forced to I(divert) it, i.e. to install it at another location. It
+ allows one to keep changes in a file provided by a debian package by
+ preventing its overwrite at package upgrade.
+ - This module manages diversions of debian packages files using the
+ C(dpkg-divert) commandline tool. It can either create or remove a
+ diversion for a given file, but also update an existing diversion
+ to modify its I(holder) and/or its I(divert) location.
+options:
+ path:
+ description:
+ - The original and absolute path of the file to be diverted or
+ undiverted. This path is unique, i.e. it is not possible to get
+ two diversions for the same I(path).
+ required: true
+ type: path
+ state:
+ description:
+ - When I(state=absent), remove the diversion of the specified
+ I(path); when I(state=present), create the diversion if it does
+ not exist, or update its package I(holder) or I(divert) location,
+ if it already exists.
+ type: str
+ default: present
+ choices: [absent, present]
+ holder:
+ description:
+ - The name of the package whose copy of file is not diverted, also
+ known as the diversion holder or the package the diversion belongs
+ to.
+ - The actual package does not have to be installed or even to exist
+ for its name to be valid. If not specified, the diversion is hold
+ by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - This parameter is ignored when I(state=absent).
+ type: str
+ divert:
+ description:
+ - The location where the versions of file will be diverted.
+ - Default is to add suffix C(.distrib) to the file path.
+ - This parameter is ignored when I(state=absent).
+ type: path
+ rename:
+ description:
+ - Actually move the file aside (when I(state=present)) or back (when
+ I(state=absent)), but only when changing the state of the diversion.
+ This parameter has no effect when attempting to add a diversion that
+ already exists or when removing an unexisting one.
+ - Unless I(force=true), renaming fails if the destination file already
+ exists (this lock being a dpkg-divert feature, and bypassing it being
+ a module feature).
+ type: bool
+ default: no
+ force:
+ description:
+ - When I(rename=true) and I(force=true), renaming is performed even if
+ the target of the renaming exists, i.e. the existing contents of the
+ file at this location will be lost.
+ - This parameter is ignored when I(rename=false).
+ type: bool
+ default: no
+notes:
+ - This module supports I(check_mode) and I(diff).
+requirements:
+ - dpkg-divert >= 1.15.0 (Debian family)
+'''
+
+EXAMPLES = r'''
+- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+
+- name: Divert /usr/bin/busybox by package 'branding'
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ holder: branding
+
+- name: Divert and rename busybox to busybox.dpkg-divert
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ divert: /usr/bin/busybox.dpkg-divert
+ rename: yes
+
+- name: Remove the busybox diversion and move the diverted file back
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ state: absent
+ rename: yes
+ force: yes
+'''
+
+RETURN = r'''
+commands:
+ description: The dpkg-divert commands ran internally by the module.
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc",
+ "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc"
+ ]
+messages:
+ description: The dpkg-divert relevant messages (stdout or stderr).
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'",
+ "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'"
+ ]
+diversion:
+ description: The status of the diversion after task execution.
+ type: dict
+ returned: always
+ contains:
+ divert:
+ description: The location of the diverted file.
+ type: str
+ holder:
+ description: The package holding the diversion.
+ type: str
+ path:
+ description: The path of the file to divert/undivert.
+ type: str
+ state:
+ description: The state of the diversion.
+ type: str
+ sample: |-
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc"
+ "state": "present"
+ }
+'''
+
+
+import re
+import os
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def diversion_state(module, command, path):
+ diversion = dict(path=path, state='absent', divert=None, holder=None)
+ rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
+ if out:
+ diversion['state'] = 'present'
+ diversion['holder'] = out.rstrip()
+ rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
+ diversion['divert'] = out.rstrip()
+ return diversion
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
+ holder=dict(required=False, type='str'),
+ divert=dict(required=False, type='path'),
+ rename=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ state = module.params['state']
+ holder = module.params['holder']
+ divert = module.params['divert']
+ rename = module.params['rename']
+ force = module.params['force']
+
+ diversion_wanted = dict(path=path, state=state)
+ changed = False
+
+ DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
+ MAINCOMMAND = [DPKG_DIVERT]
+
+ # Option --listpackage is needed and comes with 1.15.0
+ rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
+ [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
+ if LooseVersion(current_version) < LooseVersion("1.15.0"):
+ module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
+ no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ path_exists = os.path.exists(b_path)
+ # Used for things not doable with a single dpkg-divert command (as forced
+ # renaming of files, and diversion's 'holder' or 'divert' updates).
+ target_exists = False
+ truename_exists = False
+
+ diversion_before = diversion_state(module, DPKG_DIVERT, path)
+ if diversion_before['state'] == 'present':
+ b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
+ truename_exists = os.path.exists(b_divert)
+
+ # Append options as requested in the task parameters, but ignore some of
+ # them when removing the diversion.
+ if rename:
+ MAINCOMMAND.append('--rename')
+ elif no_rename_is_supported:
+ MAINCOMMAND.append('--no-rename')
+
+ if state == 'present':
+ if holder and holder != 'LOCAL':
+ MAINCOMMAND.extend(['--package', holder])
+ diversion_wanted['holder'] = holder
+ else:
+ MAINCOMMAND.append('--local')
+ diversion_wanted['holder'] = 'LOCAL'
+
+ if divert:
+ MAINCOMMAND.extend(['--divert', divert])
+ target = divert
+ else:
+ target = '%s.distrib' % path
+
+ MAINCOMMAND.extend(['--add', path])
+ diversion_wanted['divert'] = target
+ b_target = to_bytes(target, errors='surrogate_or_strict')
+ target_exists = os.path.exists(b_target)
+
+ else:
+ MAINCOMMAND.extend(['--remove', path])
+ diversion_wanted['divert'] = None
+ diversion_wanted['holder'] = None
+
+ # Start to populate the returned objects.
+ diversion = diversion_before.copy()
+ maincommand = ' '.join(MAINCOMMAND)
+ commands = [maincommand]
+
+ if module.check_mode or diversion_wanted == diversion_before:
+ MAINCOMMAND.insert(1, '--test')
+ diversion_after = diversion_wanted
+
+ # Just try and see
+ rc, stdout, stderr = module.run_command(MAINCOMMAND)
+
+ if rc == 0:
+ messages = [stdout.rstrip()]
+
+ # else... cases of failure with dpkg-divert are:
+ # - The diversion does not belong to the same package (or LOCAL)
+ # - The divert filename is not the same (e.g. path.distrib != path.divert)
+ # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
+ # diverted file exist)
+
+ elif state != diversion_before['state']:
+ # There should be no case with 'divert' and 'holder' when creating the
+ # diversion from none, and they're ignored when removing the diversion.
+ # So this is all about renaming...
+ if rename and path_exists and (
+ (state == 'absent' and truename_exists) or
+ (state == 'present' and target_exists)):
+ if not force:
+ msg = "Set 'force' param to True to force renaming of files."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ else:
+ msg = "Unexpected error while changing state of the diversion."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+ to_remove = path
+ if state == 'present':
+ to_remove = target
+
+ if not module.check_mode:
+ try:
+ b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
+ os.unlink(b_remove)
+ except OSError as e:
+ msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+
+ messages = [stdout.rstrip()]
+
+ # The situation is that we want to modify the settings (holder or divert)
+ # of an existing diversion. dpkg-divert does not handle this, and we have
+ # to remove the existing diversion first, and then set a new one.
+ else:
+ RMDIVERSION = [DPKG_DIVERT, '--remove', path]
+ if no_rename_is_supported:
+ RMDIVERSION.insert(1, '--no-rename')
+ rmdiversion = ' '.join(RMDIVERSION)
+
+ if module.check_mode:
+ RMDIVERSION.insert(1, '--test')
+
+ if rename:
+ MAINCOMMAND.remove('--rename')
+ if no_rename_is_supported:
+ MAINCOMMAND.insert(1, '--no-rename')
+ maincommand = ' '.join(MAINCOMMAND)
+
+ commands = [rmdiversion, maincommand]
+ rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
+
+ if module.check_mode:
+ messages = [rmdout.rstrip(), 'Running in check mode']
+ else:
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+ messages = [rmdout.rstrip(), stdout.rstrip()]
+
+ # Avoid if possible to orphan files (i.e. to dereference them in diversion
+ # database but let them in place), but do not make renaming issues fatal.
+ # BTW, this module is not about state of files involved in the diversion.
+ old = diversion_before['divert']
+ new = diversion_wanted['divert']
+ if new != old:
+ b_old = to_bytes(old, errors='surrogate_or_strict')
+ b_new = to_bytes(new, errors='surrogate_or_strict')
+ if os.path.exists(b_old) and not os.path.exists(b_new):
+ try:
+ os.rename(b_old, b_new)
+ except OSError as e:
+ pass
+
+ if not module.check_mode:
+ diversion_after = diversion_state(module, DPKG_DIVERT, path)
+
+ diversion = diversion_after.copy()
+ diff = dict()
+ if module._diff:
+ diff['before'] = diversion_before
+ diff['after'] = diversion_after
+
+ if diversion_after != diversion_before:
+ changed = True
+
+ if diversion_after == diversion_wanted:
+ module.exit_json(changed=changed, diversion=diversion,
+ commands=commands, messages=messages, diff=diff)
+ else:
+ msg = "Unexpected error: see stdout and stderr for details."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py
new file mode 100644
index 00000000..5e1d7930
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: easy_install
+short_description: Installs Python libraries
+description:
+ - Installs Python libraries, optionally in a I(virtualenv)
+options:
+ name:
+ type: str
+ description:
+ - A Python library name
+ required: true
+ virtualenv:
+ type: str
+ description:
+ - an optional I(virtualenv) directory path to install into. If the
+ I(virtualenv) does not exist, it is created automatically
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: 'no'
+ virtualenv_command:
+ type: str
+ description:
+ - The command to create the virtual environment with. For example
+ C(pyvenv), C(virtualenv), C(virtualenv2).
+ default: virtualenv
+ executable:
+ type: str
+ description:
+ - The explicit executable or a pathname to the executable to be used to
+ run easy_install for a specific version of Python installed in the
+ system. For example C(easy_install-3.3), if there are both Python 2.7
+ and 3.3 installations in the system and you want to run easy_install
+ for the Python 3.3 installation.
+ default: easy_install
+ state:
+ type: str
+ description:
+ - The desired state of the library. C(latest) ensures that the latest version is installed.
+ choices: [present, latest]
+ default: present
+notes:
+ - Please note that the C(easy_install) module can only install Python
+ libraries. Thus this module is not able to remove libraries. It is
+ generally recommended to use the M(ansible.builtin.pip) module which you can first install
+ using M(community.general.easy_install).
+ - Also note that I(virtualenv) must be installed on the remote host if the
+ C(virtualenv) parameter is specified.
+requirements: [ "virtualenv" ]
+author: "Matt Wright (@mattupstate)"
+'''
+
+EXAMPLES = '''
+- name: Install or update pip
+ community.general.easy_install:
+ name: pip
+ state: latest
+
+- name: Install Bottle into the specified virtualenv
+ community.general.easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
+'''
+
+import os
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+
+
+def install_package(module, name, easy_install, executable_arguments):
+ cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
+ rc, out, err = module.run_command(cmd)
+ return rc, out, err
+
+
+def _is_package_installed(module, name, easy_install, executable_arguments):
+ # Copy and add to the arguments
+ executable_arguments = executable_arguments[:]
+ executable_arguments.append('--dry-run')
+ rc, out, err = install_package(module, name, easy_install, executable_arguments)
+ if rc:
+ module.fail_json(msg=err)
+ return 'Downloading' not in out
+
+
+def _get_easy_install(module, env=None, executable=None):
+ candidate_easy_inst_basenames = ['easy_install']
+ easy_install = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ easy_install = executable
+ else:
+ candidate_easy_inst_basenames.insert(0, executable)
+ if easy_install is None:
+ if env is None:
+ opt_dirs = []
+ else:
+ # Try easy_install with the virtualenv directory first.
+ opt_dirs = ['%s/bin' % env]
+ for basename in candidate_easy_inst_basenames:
+ easy_install = module.get_bin_path(basename, False, opt_dirs)
+ if easy_install is not None:
+ break
+ # easy_install should have been found by now. The final call to
+ # get_bin_path will trigger fail_json.
+ if easy_install is None:
+ basename = candidate_easy_inst_basenames[0]
+ easy_install = module.get_bin_path(basename, True, opt_dirs)
+ return easy_install
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'latest'],
+ type='str'),
+ virtualenv=dict(default=None, required=False),
+ virtualenv_site_packages=dict(default=False, type='bool'),
+ virtualenv_command=dict(default='virtualenv', required=False),
+ executable=dict(default='easy_install', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ env = module.params['virtualenv']
+ executable = module.params['executable']
+ site_packages = module.params['virtualenv_site_packages']
+ virtualenv_command = module.params['virtualenv_command']
+ executable_arguments = []
+ if module.params['state'] == 'latest':
+ executable_arguments.append('--upgrade')
+
+ rc = 0
+ err = ''
+ out = ''
+
+ if env:
+ virtualenv = module.get_bin_path(virtualenv_command, True)
+
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ command = '%s %s' % (virtualenv, env)
+ if site_packages:
+ command += ' --system-site-packages'
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
+
+ rc += rc_venv
+ out += out_venv
+ err += err_venv
+
+ easy_install = _get_easy_install(module, env, executable)
+
+ cmd = None
+ changed = False
+ installed = _is_package_installed(module, name, easy_install, executable_arguments)
+
+ if not installed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
+
+ rc += rc_easy_inst
+ out += out_easy_inst
+ err += err_easy_inst
+
+ changed = True
+
+ if rc != 0:
+ module.fail_json(msg=err, cmd=cmd)
+
+ module.exit_json(changed=changed, binary=easy_install,
+ name=name, virtualenv=env)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py
new file mode 100644
index 00000000..be63c920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+options:
+ username:
+ type: str
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ type: str
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ type: str
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ type: bool
+ state:
+ type: str
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+# Example playbook entries using the ejabberd_user module to manage users state.
+
+- name: Create a user if it does not exist
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: Delete a user if it exists
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ state: absent
+'''
+
+import syslog
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class EjabberdUserException(Exception):
+ """ Base exception for EjabberdUser class object """
+ pass
+
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('check_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return rc
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('check_account', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return not bool(int(rc))
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ if not all(options):
+ raise EjabberdUserException
+
+ cmd = 'ejabberdctl %s ' % cmd
+ cmd += " ".join(options)
+ self.log('command: %s' % cmd)
+ return self.module.run_command(cmd.split())
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('change_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('register', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('unregister', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(default=None, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict(changed=False)
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
new file mode 100644
index 00000000..27a67406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+author:
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a file://
+ URL to install from a local file, or a remote URL. If this is not set, the plugin
+ location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name
+ parameter. If, for example, the plugin is already installed, changing this has no
+ effect.
+ - For ES 1.x use url.
+ required: False
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: False
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ force:
+ description:
+ - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
+ default: False
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - The default changed in Ansible 2.4 to None.
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: present
+
+- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ version: 2.0.0
+
+- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: absent
+
+- name: Install a specific plugin in Elasticsearch >= 5.0
+ community.general.elasticsearch_plugin:
+ name: analysis-icu
+ state: present
+
+- name: Install the ingest-geoip plugin with a forced installation
+ community.general.elasticsearch_plugin:
+ name: ingest-geoip
+ state: present
+ force: yes
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+PLUGIN_BIN_PATHS = tuple([
+ '/usr/share/elasticsearch/bin/elasticsearch-plugin',
+ '/usr/share/elasticsearch/bin/plugin'
+])
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_name, plugin_dir):
+ return os.path.isdir(os.path.join(plugin_dir, plugin_name))
+
+
+def parse_error(string):
+ reason = "ERROR: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
+ is_old_command = (os.path.basename(plugin_bin) == 'plugin')
+
+ # Timeout and version are only valid for plugin, not elasticsearch-plugin
+ if is_old_command:
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ if version:
+ plugin_name = plugin_name + '/' + version
+ cmd_args[2] = plugin_name
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ # Legacy ES 1.x
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if force:
+ cmd_args.append("--batch")
+ if src:
+ cmd_args.append(src)
+ else:
+ cmd_args.append(plugin_name)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def get_plugin_bin(module, plugin_bin=None):
+ # Use the plugin_bin that was supplied first before trying other options
+ valid_plugin_bin = None
+ if plugin_bin and os.path.isfile(plugin_bin):
+ valid_plugin_bin = plugin_bin
+
+ else:
+ # Add the plugin_bin passed into the module to the top of the list of paths to test,
+ # testing for that binary name first before falling back to the default paths.
+ bin_paths = list(PLUGIN_BIN_PATHS)
+ if plugin_bin and plugin_bin not in bin_paths:
+ bin_paths.insert(0, plugin_bin)
+
+ # Get separate lists of dirs and binary names from the full paths to the
+ # plugin binaries.
+ plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
+ plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
+
+ # Check for the binary names in the default system paths as well as the path
+ # specified in the module arguments.
+ for bin_file in plugin_bins:
+ valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
+ if valid_plugin_bin:
+ break
+
+ if not valid_plugin_bin:
+ module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
+
+ return valid_plugin_bin
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ src=dict(default=None),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ force=dict(type='bool', default=False),
+ plugin_bin=dict(type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ mutually_exclusive=[("src", "url")],
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ src = module.params["src"]
+ timeout = module.params["timeout"]
+ force = module.params["force"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ # Search provided path and system paths for valid binary
+ plugin_bin = get_plugin_bin(module, plugin_bin)
+
+ repo = parse_plugin_repo(name)
+ present = is_plugin_present(repo, plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
new file mode 100644
index 00000000..dfac03ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: emc_vnx_sg_member
+
+short_description: Manage storage group member on EMC VNX
+
+
+description:
+ - "This module manages the members of an existing storage group."
+
+extends_documentation_fragment:
+- community.general.emc.emc_vnx
+
+
+options:
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ lunid:
+ description:
+ - Lun id to be added.
+ required: true
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - C(present) ensures specified lunid is present in the Storage Group.
+ - C(absent) ensures specified lunid is absent from Storage Group.
+ default: present
+ choices: [ "present", "absent"]
+
+
+author:
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+'''
+
+EXAMPLES = '''
+- name: Add lun to storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: present
+
+- name: Remove lun from storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: absent
+'''
+
+RETURN = '''
+hluid:
+ description: LUNID that hosts attached to the storage group will see.
+ type: int
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
+
+LIB_IMP_ERR = None
+try:
+ from storops import VNXSystem
+ from storops.exception import VNXCredentialError, VNXStorageGroupError, \
+ VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
+ HAS_LIB = True
+except Exception:
+ LIB_IMP_ERR = traceback.format_exc()
+ HAS_LIB = False
+
+
+def run_module():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ lunid=dict(type='int', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_args.update(emc_vnx_argument_spec)
+
+ result = dict(
+ changed=False,
+ hluid=None
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
+ exception=LIB_IMP_ERR)
+
+ sp_user = module.params['sp_user']
+ sp_address = module.params['sp_address']
+ sp_password = module.params['sp_password']
+ alu = module.params['lunid']
+
+ # if the user is working with this module in only check mode we do not
+ # want to make any changes to the environment, just return the current
+ # state with no modifications
+ if module.check_mode:
+ return result
+
+ try:
+ vnx = VNXSystem(sp_address, sp_user, sp_password)
+ sg = vnx.get_sg(module.params['name'])
+ if sg.existed:
+ if module.params['state'] == 'present':
+ if not sg.has_alu(alu):
+ try:
+ result['hluid'] = sg.attach_alu(alu)
+ result['changed'] = True
+ except VNXAluAlreadyAttachedError:
+ result['hluid'] = sg.get_hlu(alu)
+ except (VNXAttachAluError, VNXStorageGroupError) as e:
+ module.fail_json(msg='Error attaching {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ result['hluid'] = sg.get_hlu(alu)
+ if module.params['state'] == 'absent' and sg.has_alu(alu):
+ try:
+ sg.detach_alu(alu)
+ result['changed'] = True
+ except VNXDetachAluNotFoundError:
+ # being not attached when using absent is OK
+ pass
+ except VNXStorageGroupError as e:
+ module.fail_json(msg='Error detaching alu {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ module.fail_json(msg='No such storage group named '
+ '{0}'.format(module.params['name']),
+ **result)
+ except VNXCredentialError as e:
+ module.fail_json(msg='{0}'.format(to_native(e)), **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py
new file mode 100644
index 00000000..78838429
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+#
+# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: etcd3
+short_description: "Set or delete key value pairs from an etcd3 cluster"
+requirements:
+ - etcd3
+description:
+ - Sets or deletes values in etcd3 cluster using its v3 api.
+ - Needs python etcd3 lib to work
+options:
+ key:
+ description:
+ - the key where the information is stored in the cluster
+ required: true
+ value:
+ description:
+ - the information stored
+ required: true
+ host:
+ description:
+ - the IP address of the cluster
+ default: 'localhost'
+ port:
+ description:
+ - the port number used to connect to the cluster
+ default: 2379
+ state:
+ description:
+ - the state of the value for the key.
+ - can be present or absent
+ required: true
+ choices: [ present, absent ]
+ user:
+ description:
+ - The etcd user to authenticate with.
+ password:
+ description:
+ - The password to use for authentication.
+ - Required if I(user) is defined.
+ ca_cert:
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if I(client_cert) and I(client_key) are defined.
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if I(client_key) is defined.
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if I(client_cert) is defined.
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+author:
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+'''
+
+EXAMPLES = """
+- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ host: "localhost"
+ port: 2379
+ state: "present"
+
+- name: Authenticate using user/password combination with a timeout of 10 seconds
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ user: "someone"
+ password: "password123"
+ timeout: 10
+
+- name: Authenticate using TLS certificates
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ ca_cert: "/etc/ssl/certs/CA_CERT.pem"
+ client_cert: "/etc/ssl/certs/cert.crt"
+ client_key: "/etc/ssl/private/key.pem"
+"""
+
+RETURN = '''
+key:
+ description: The key that was queried
+ returned: always
+ type: str
+old_value:
+ description: The previous value in the cluster
+ returned: always
+ type: str
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ ETCD_IMP_ERR = traceback.format_exc()
+ HAS_ETCD = False
+
+
+def run_module():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=2379),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ user=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ ca_cert=dict(type='path'),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ timeout=dict(type='int'),
+ )
+
+ # seed the result dict in the object
+ # we primarily care about changed and state
+ # change is if this module effectively modified the target
+ # state will include any data that you want your module to pass back
+ # for consumption, for example, in a subsequent task
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[['client_cert', 'client_key'], ['user', 'password']],
+ )
+
+ # It is possible to set `ca_cert` to verify the server identity without
+ # setting `client_cert` or `client_key` to authenticate the client
+ # so required_together is enough
+ # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
+ # of either `client_cert` or `client_key` is enough
+ if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
+ module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
+
+ result['key'] = module.params.get('key')
+ module.params['cert_cert'] = module.params.pop('client_cert')
+ module.params['cert_key'] = module.params.pop('client_key')
+
+ if not HAS_ETCD:
+ module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
+
+ allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
+ 'timeout', 'user', 'password']
+ # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
+ # the minimum supported version
+ # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
+ client_params = dict()
+ for key, value in module.params.items():
+ if key in allowed_keys:
+ client_params[key] = value
+ try:
+ etcd = etcd3.client(**client_params)
+ except Exception as exp:
+ module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+ try:
+ cluster_value = etcd.get(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+
+ # Make the cluster_value[0] a string for string comparisons
+ result['old_value'] = to_native(cluster_value[0])
+
+ if module.params['state'] == 'absent':
+ if cluster_value[0] is not None:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.delete(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ elif module.params['state'] == 'present':
+ if result['old_value'] != module.params['value']:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.put(module.params['key'], module.params['value'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="State not recognized")
+
+ # manipulate or modify the state as needed (this is going to be the
+ # part where your module will do what it needs to do)
+
+ # during the execution of the module, if there is an exception or a
+ # conditional state that effectively causes a failure, run
+ # AnsibleModule.fail_json() to pass in the message and the result
+
+ # in the event of a successful module execution, you will want to
+ # simple AnsibleModule.exit_json(), passing the key/value results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py
new file mode 100644
index 00000000..abd2ebc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+# ansible www.example.net -m facter
+
+- name: Execute facter no arguments
+ community.general.facter:
+
+- name: Execute facter with arguments
+ community.general.facter:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(required=False, type='list', elements='str')
+ )
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py
new file mode 100644
index 00000000..2872b5ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
+# Sponsored by Oomph, Inc. http://www.oomphinc.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: archive
+short_description: Creates a compressed archive of one or more files or trees
+extends_documentation_fragment: files
+description:
+ - Creates or extends an archive.
+ - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
+ - Source files can be deleted after archival by specifying I(remove=True).
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ type: list
+ elements: path
+ required: true
+ format:
+ description:
+ - The type of compression to use.
+ - Support for xz was added in Ansible 2.5.
+ type: str
+ choices: [ bz2, gz, tar, xz, zip ]
+ default: gz
+ dest:
+ description:
+ - The file name of the destination archive. The parent directory must exists on the remote host.
+ - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ type: path
+ exclude_path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
+ type: list
+ elements: path
+ force_archive:
+ description:
+ - Allow you to force the module to treat this as an archive even if only a single file is specified.
+ - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
+ type: bool
+ default: false
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ default: no
+notes:
+ - Requires tarfile, zipfile, gzip and bzip2 packages on target host.
+ - Requires lzma or backports.lzma if using xz format.
+ - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
+seealso:
+- module: ansible.builtin.unarchive
+author:
+- Ben Doherty (@bendoh)
+'''
+
+EXAMPLES = r'''
+- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
+ community.general.archive:
+ path: /path/to/foo
+ dest: /path/to/foo.tgz
+
+- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+ community.general.archive:
+ path: /path/to/foo
+ remove: yes
+
+- name: Create a zip archive of /path/to/foo
+ community.general.archive:
+ path: /path/to/foo
+ format: zip
+
+- name: Create a bz2 archive of multiple files, rooted at /path
+ community.general.archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/bar
+ - /path/to/foo/baz
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/ba*
+ format: bz2
+
+- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.gz
+ format: gz
+
+- name: Create a tar.gz archive of a single file.
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.tar.gz
+ format: gz
+ force_archive: true
+'''
+
+RETURN = r'''
+state:
+ description:
+ The current state of the archived file.
+ If 'absent', then no source files were found and the archive does not exist.
+ If 'compress', then the file source file is in the compressed state.
+ If 'archive', then the source file or paths are currently archived.
+ If 'incomplete', then an archive was created, but not all source paths were found.
+ type: str
+ returned: always
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: str
+ returned: always
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
+expanded_exclude_paths:
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+'''
+
+import bz2
+import filecmp
+import glob
+import gzip
+import io
+import os
+import re
+import shutil
+import tarfile
+import zipfile
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
+
+
+LZMA_IMP_ERR = None
+if PY3:
+ try:
+ import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+else:
+ try:
+ from backports import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ check_mode = module.check_mode
+ paths = params['path']
+ dest = params['dest']
+ b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
+ exclude_paths = params['exclude_path']
+ remove = params['remove']
+
+ b_expanded_paths = []
+ b_expanded_exclude_paths = []
+ fmt = params['format']
+ b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
+ force_archive = params['force_archive']
+ globby = False
+ changed = False
+ state = 'absent'
+
+ # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
+ archive = False
+ b_successes = []
+
+ # Fail early
+ if not HAS_LZMA and fmt == 'xz':
+ module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
+ exception=LZMA_IMP_ERR)
+ module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
+
+ for path in paths:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_path or b'?' in b_path):
+ b_expanded_paths.extend(glob.glob(b_path))
+ globby = True
+
+ # If there are no glob characters the path is added to the expanded paths
+ # whether the path exists or not
+ else:
+ b_expanded_paths.append(b_path)
+
+ # Only attempt to expand the exclude paths if it exists
+ if exclude_paths:
+ for exclude_path in exclude_paths:
+ b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_exclude_path or b'?' in b_exclude_path):
+ b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
+
+ # If there are no glob character the exclude path is added to the expanded
+ # exclude paths whether the path exists or not.
+ else:
+ b_expanded_exclude_paths.append(b_exclude_path)
+
+ if not b_expanded_paths:
+ return module.fail_json(
+ path=', '.join(paths),
+ expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'),
+ msg='Error, no source paths were found'
+ )
+
+ # Only try to determine if we are working with an archive or not if we haven't set archive to true
+ if not force_archive:
+ # If we actually matched multiple files or TRIED to, then
+ # treat this as a multi-file archive
+ archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1
+ else:
+ archive = True
+
+ # Default created file name (for single-file archives) to
+ # <file>.<format>
+ if not b_dest and not archive:
+ b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt)
+
+ # Force archives to specify 'dest'
+ if archive and not b_dest:
+ module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
+
+ b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
+
+ b_archive_paths = []
+ b_missing = []
+ b_arcroot = b''
+
+ for b_path in b_expanded_paths:
+ # Use the longest common directory name among all the files
+ # as the archive root path
+ if b_arcroot == b'':
+ b_arcroot = os.path.dirname(b_path) + b_sep
+ else:
+ for i in range(len(b_arcroot)):
+ if b_path[i] != b_arcroot[i]:
+ break
+
+ if i < len(b_arcroot):
+ b_arcroot = os.path.dirname(b_arcroot[0:i + 1])
+
+ b_arcroot += b_sep
+
+ # Don't allow archives to be created anywhere within paths to be removed
+ if remove and os.path.isdir(b_path):
+ b_path_dir = b_path
+ if not b_path.endswith(b'/'):
+ b_path_dir += b'/'
+
+ if b_dest.startswith(b_path_dir):
+ module.fail_json(
+ path=', '.join(paths),
+ msg='Error, created archive can not be contained in source paths when remove=True'
+ )
+
+ if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths:
+ b_archive_paths.append(b_path)
+ else:
+ b_missing.append(b_path)
+
+ # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
+ if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
+ # Just check the filename to know if it's an archive or simple compressed file
+ if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
+ state = 'archive'
+ else:
+ state = 'compress'
+
+ # Multiple files, or globbiness
+ elif archive:
+ if not b_archive_paths:
+ # No source files were found, but the archive is there.
+ if os.path.lexists(b_dest):
+ state = 'archive'
+ elif b_missing:
+ # SOME source files were found, but not all of them
+ state = 'incomplete'
+
+ archive = None
+ size = 0
+ errors = []
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ if state != 'archive':
+ if check_mode:
+ changed = True
+
+ else:
+ try:
+ # Slightly more difficult (and less efficient!) compression using zipfile module
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+
+ # Easier compression using tarfile module
+ elif fmt == 'gz' or fmt == 'bz2':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
+
+ # python3 tarfile module allows xz format but for python2 we have to create the tarfile
+ # in memory and then compress it with lzma.
+ elif fmt == 'xz':
+ arcfileIO = io.BytesIO()
+ arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
+
+ # Or plain tar archiving
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+
+ b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
+ for b_path in b_archive_paths:
+ if os.path.isdir(b_path):
+ # Recurse into directories
+ for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True):
+ if not b_dirpath.endswith(b_sep):
+ b_dirpath += b_sep
+
+ for b_dirname in b_dirnames:
+ b_fullpath = b_dirpath + b_dirname
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ except Exception as e:
+ errors.append('%s: %s' % (n_fullpath, to_native(e)))
+
+ for b_filename in b_filenames:
+ b_fullpath = b_dirpath + b_filename
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ b_successes.append(b_fullpath)
+ except Exception as e:
+ errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
+ else:
+ path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
+ arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
+ if fmt == 'zip':
+ arcfile.write(path, arcname)
+ else:
+ arcfile.add(path, arcname, recursive=False)
+
+ b_successes.append(b_path)
+
+ except Exception as e:
+ expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt)
+ module.fail_json(
+ msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)),
+ exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ state = 'archive'
+
+ if fmt == 'xz':
+ with lzma.open(b_dest, 'wb') as f:
+ f.write(arcfileIO.getvalue())
+ arcfileIO.close()
+
+ if errors:
+ module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
+
+ if state in ['archive', 'incomplete'] and remove:
+ for b_path in b_successes:
+ try:
+ if os.path.isdir(b_path):
+ shutil.rmtree(b_path)
+ elif not check_mode:
+ os.remove(b_path)
+ except OSError as e:
+ errors.append(to_native(b_path))
+
+ if errors:
+ module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors)
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if not check_mode and os.path.getsize(b_dest) != size:
+ changed = True
+
+ if b_successes and state != 'incomplete':
+ state = 'archive'
+
+ # Simple, single-file compression
+ else:
+ b_path = b_expanded_paths[0]
+
+ # No source or compressed file
+ if not (os.path.exists(b_path) or os.path.lexists(b_dest)):
+ state = 'absent'
+
+ # if it already exists and the source file isn't there, consider this done
+ elif not os.path.lexists(b_path) and os.path.lexists(b_dest):
+ state = 'compress'
+
+ else:
+ if module.check_mode:
+ if not os.path.exists(b_dest):
+ changed = True
+ else:
+ size = 0
+ f_in = f_out = arcfile = None
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ try:
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+ arcfile.write(
+ to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
+ to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
+ )
+ arcfile.close()
+ state = 'archive' # because all zip files are archives
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+ arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
+ arcfile.close()
+ else:
+ f_in = open(b_path, 'rb')
+
+ n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
+ if fmt == 'gz':
+ f_out = gzip.open(n_dest, 'wb')
+ elif fmt == 'bz2':
+ f_out = bz2.BZ2File(n_dest, 'wb')
+ elif fmt == 'xz':
+ f_out = lzma.LZMAFile(n_dest, 'wb')
+ else:
+ raise OSError("Invalid format")
+
+ shutil.copyfileobj(f_in, f_out)
+
+ b_successes.append(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ dest=dest,
+ msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ if f_in:
+ f_in.close()
+ if f_out:
+ f_out.close()
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(b_dest) != size:
+ changed = True
+
+ state = 'compress'
+
+ if remove and not check_mode:
+ try:
+ os.remove(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()
+ )
+
+ try:
+ file_args = module.load_file_common_arguments(params, path=b_dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ params['path'] = b_dest
+ file_args = module.load_file_common_arguments(params)
+
+ if not check_mode:
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(
+ archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
+ dest=dest,
+ changed=changed,
+ state=state,
+ arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
+ missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
+ expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
+ expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py
new file mode 100644
index 00000000..0beaca9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ini_file
+short_description: Tweak settings in INI files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual settings in an INI-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
+ - Adds missing sections if they don't exist.
+ - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
+ - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ no other modifications need to be applied.
+options:
+ path:
+ description:
+ - Path to the INI-style file; this file is created if required.
+ - Before Ansible 2.3 this option was only usable as I(dest).
+ type: path
+ required: true
+ aliases: [ dest ]
+ section:
+ description:
+ - Section name in INI file. This is added if C(state=present) automatically when
+ a single value is being set.
+ - If left empty or set to C(null), the I(option) will be placed before the first I(section).
+ - Using C(null) is also required if the config format does not support sections.
+ type: str
+ required: true
+ option:
+ description:
+ - If set (required for changing a I(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole I(section).
+ type: str
+ value:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ type: str
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ state:
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ no_extra_spaces:
+ description:
+ - Do not insert spaces before and after '=' symbol.
+ type: bool
+ default: no
+ create:
+ description:
+ - If set to C(no), the module will fail if the file does not already exist.
+ - By default it will create the file if it is missing.
+ type: bool
+ default: yes
+ allow_no_value:
+ description:
+ - Allow option without value and without '=' symbol.
+ type: bool
+ default: no
+notes:
+ - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ales Nosek (@noseka1)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' was used instead of 'path'
+- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: '0600'
+ backup: yes
+
+- name: Ensure "temperature=cold is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: yes
+'''
+
+import os
+import re
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def match_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def match_active_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def do_ini(module, filename, section=None, option=None, value=None,
+ state='present', backup=False, no_extra_spaces=False, create=True,
+ allow_no_value=False):
+
+ diff = dict(
+ before='',
+ after='',
+ before_header='%s (content)' % filename,
+ after_header='%s (content)' % filename,
+ )
+
+ if not os.path.exists(filename):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ ini_file = open(filename, 'r')
+ try:
+ ini_lines = ini_file.readlines()
+ finally:
+ ini_file.close()
+
+ if module._diff:
+ diff['before'] = ''.join(ini_lines)
+
+ changed = False
+
+ # ini file could be empty
+ if not ini_lines:
+ ini_lines.append('\n')
+
+ # last line of file may not contain a trailing newline
+ if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
+ ini_lines[-1] += '\n'
+ changed = True
+
+ # append fake section lines to simplify the logic
+ # At top:
+ # Fake random section to do not match any other in the file
+ # Using commit hash as fake section name
+ fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
+
+ # Insert it at the beginning
+ ini_lines.insert(0, '[%s]' % fake_section_name)
+
+ # At botton:
+ ini_lines.append('[')
+
+ # If no section is defined, fake section is used
+ if not section:
+ section = fake_section_name
+
+ within_section = not section
+ section_start = 0
+ msg = 'OK'
+ if no_extra_spaces:
+ assignment_format = '%s=%s\n'
+ else:
+ assignment_format = '%s = %s\n'
+
+ for index, line in enumerate(ini_lines):
+ if line.startswith('[%s]' % section):
+ within_section = True
+ section_start = index
+ elif line.startswith('['):
+ if within_section:
+ if state == 'present':
+ # insert missing option line at the end of the section
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ if not value and allow_no_value:
+ ini_lines.insert(i, '%s\n' % option)
+ else:
+ ini_lines.insert(i, assignment_format % (option, value))
+ msg = 'option added'
+ changed = True
+ break
+ elif state == 'absent' and not option:
+ # remove the entire section
+ del ini_lines[section_start:index]
+ msg = 'section removed'
+ changed = True
+ break
+ else:
+ if within_section and option:
+ if state == 'present':
+ # change the existing option line
+ if match_opt(option, line):
+ if not value and allow_no_value:
+ newline = '%s\n' % option
+ else:
+ newline = assignment_format % (option, value)
+ option_changed = ini_lines[index] != newline
+ changed = changed or option_changed
+ if option_changed:
+ msg = 'option changed'
+ ini_lines[index] = newline
+ if option_changed:
+ # remove all possible option occurrences from the rest of the section
+ index = index + 1
+ while index < len(ini_lines):
+ line = ini_lines[index]
+ if line.startswith('['):
+ break
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ else:
+ index = index + 1
+ break
+ elif state == 'absent':
+ # delete the existing line
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ changed = True
+ msg = 'option changed'
+ break
+
+ # remove the fake section line
+ del ini_lines[0]
+ del ini_lines[-1:]
+
+ if not within_section and option and state == 'present':
+ ini_lines.append('[%s]\n' % section)
+ if not value and allow_no_value:
+ ini_lines.append('%s\n' % option)
+ else:
+ ini_lines.append(assignment_format % (option, value))
+ changed = True
+ msg = 'section and option added'
+
+ if module._diff:
+ diff['after'] = ''.join(ini_lines)
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if backup:
+ backup_file = module.backup_local(filename)
+
+ try:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'w')
+ f.writelines(ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+
+ return (changed, backup_file, diff, msg)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest']),
+ section=dict(type='str', required=True),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ no_extra_spaces=dict(type='bool', default=False),
+ allow_no_value=dict(type='bool', default=False),
+ create=dict(type='bool', default=True)
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ section = module.params['section']
+ option = module.params['option']
+ value = module.params['value']
+ state = module.params['state']
+ backup = module.params['backup']
+ no_extra_spaces = module.params['no_extra_spaces']
+ allow_no_value = module.params['allow_no_value']
+ create = module.params['create']
+
+ (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
+
+ if not module.check_mode and os.path.exists(path):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ results = dict(
+ changed=changed,
+ diff=diff,
+ msg=msg,
+ path=path,
+ )
+ if backup_file is not None:
+ results['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py
new file mode 100644
index 00000000..bf6359b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_create
+short_description: Generate ISO file with specified files or folders
+description:
+ - This module is used to generate ISO file with specified path of files.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+requirements:
+- "pycdlib"
+- "python >= 2.7"
+version_added: '0.2.0'
+
+options:
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
+ - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
+ underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
+ names are limited to 255 characters.'
+ type: list
+ required: yes
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - Will create intermediate folders when they does not exist.
+ type: path
+ required: yes
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values C(1), C(2), C(3), C(4) are supported.
+ - The default value is level C(1), which is the most conservative, level C(3) is recommended.
+ - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set C(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are C(1), C(2), or C(3).
+ - Level C(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to C(True), then version 2.60 of the UDF spec is used.
+ - If not specified or set to C(False), then no UDF support is added.
+ type: bool
+ default: False
+'''
+
+EXAMPLES = r'''
+- name: Create an ISO file
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ interchange_level: 3
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ rock_ridge: 1.09
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - ./windows_config/Autounattend.xml
+ dest_iso: ./test.iso
+ interchange_level: 3
+ joliet: 3
+ vol_ident: WIN_AUTOINSTALL
+'''
+
+RETURN = r'''
+source_file:
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
+created_iso:
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
+interchange_level:
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
+vol_ident:
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
+joliet:
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
+rock_ridge:
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
+udf:
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: False
+'''
+
+import os
+import traceback
+
+PYCDLIB_IMP_ERR = None
+try:
+ import pycdlib
+ HAS_PYCDLIB = True
+except ImportError:
+ PYCDLIB_IMP_ERR = traceback.format_exc()
+ HAS_PYCDLIB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot,
+ # followed by a maximum 3 character extension, followed by a semicolon and a version
+ file_name = os.path.basename(file_path)
+ if '.' not in file_name:
+ file_in_iso_path = file_path.upper() + '.;1'
+ else:
+ file_in_iso_path = file_path.upper() + ';1'
+ if rock_ridge:
+ rr_name = file_name
+ if use_joliet:
+ joliet_path = file_path
+ if use_udf:
+ udf_path = file_path
+ try:
+ iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err)))
+
+
+def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ iso_dir_path = dir_path.upper()
+ if rock_ridge:
+ rr_name = os.path.basename(dir_path)
+ if use_joliet:
+ joliet_path = iso_dir_path
+ if use_udf:
+ udf_path = iso_dir_path
+ try:
+ iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err)))
+
+
+def main():
+ argument_spec = dict(
+ src_files=dict(type='list', required=True, elements='path'),
+ dest_iso=dict(type='path', required=True),
+ interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ vol_ident=dict(type='str'),
+ rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']),
+ joliet=dict(type='int', choices=[1, 2, 3]),
+ udf=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_PYCDLIB:
+ module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR)
+
+ src_file_list = module.params.get('src_files')
+ if src_file_list and len(src_file_list) == 0:
+ module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.')
+ for src_file in src_file_list:
+ if not os.path.exists(src_file):
+ module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file)
+
+ dest_iso = module.params.get('dest_iso')
+ if dest_iso and len(dest_iso) == 0:
+ module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.')
+
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ # will create intermediate dir for new ISO file
+ try:
+ os.makedirs(dest_iso_dir)
+ except OSError as err:
+ module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err)))
+
+ volume_id = module.params.get('vol_ident')
+ if volume_id is None:
+ volume_id = ''
+ inter_level = module.params.get('interchange_level')
+ rock_ridge = module.params.get('rock_ridge')
+ use_joliet = module.params.get('joliet')
+ use_udf = None
+ if module.params['udf']:
+ use_udf = '2.60'
+
+ result = dict(
+ changed=False,
+ source_file=src_file_list,
+ created_iso=dest_iso,
+ interchange_level=inter_level,
+ vol_ident=volume_id,
+ rock_ridge=rock_ridge,
+ joliet=use_joliet,
+ udf=use_udf
+ )
+ if not module.check_mode:
+ iso_file = pycdlib.PyCdlib()
+ iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf)
+
+ for src_file in src_file_list:
+ # if specify a dir then go through the dir to add files and dirs
+ if os.path.isdir(src_file):
+ dir_list = []
+ file_list = []
+ src_file = src_file.rstrip('/')
+ dir_name = os.path.basename(src_file)
+ add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+
+ # get dir list and file list
+ for path, dirs, files in os.walk(src_file):
+ for filename in files:
+ file_list.append(os.path.join(path, filename))
+ for dir in dirs:
+ dir_list.append(os.path.join(path, dir))
+ for new_dir in dir_list:
+ add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1],
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+ for new_file in file_list:
+ add_file(module, iso_file=iso_file, src_file=new_file,
+ file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+ # if specify a file then add this file directly to the '/' path in ISO
+ else:
+ add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file),
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+
+ iso_file.write(dest_iso)
+ iso_file.close()
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py
new file mode 100644
index 00000000..0c73ac96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Jeroen Hoekx (@jhoekx)
+- Matt Robinson (@ribbons)
+- Dag Wieers (@dagwieers)
+module: iso_extract
+short_description: Extract files from an ISO image
+description:
+- This module has two possible ways of operation.
+- If 7zip is installed on the system, this module extracts files from an ISO
+ into a temporary directory and copies files to a given destination,
+ if needed.
+- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
+ mounts the ISO image to a temporary location, and copies files to a given
+ destination, if needed.
+requirements:
+- Either 7z (from I(7zip) or I(p7zip) package)
+- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
+options:
+ image:
+ description:
+ - The ISO image to extract files from.
+ type: path
+ required: yes
+ aliases: [ path, src ]
+ dest:
+ description:
+ - The destination directory to extract files to.
+ type: path
+ required: yes
+ files:
+ description:
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
+ type: list
+ elements: str
+ required: yes
+ force:
+ description:
+ - If C(yes), which will replace the remote file when contents are different than the source.
+ - If C(no), the file will only be extracted and copied if the destination does not already exist.
+ - Alias C(thirsty) has been deprecated and will be removed in community.general 3.0.0.
+ type: bool
+ default: yes
+ aliases: [ thirsty ]
+ executable:
+ description:
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ type: path
+ default: '7z'
+notes:
+- Only the file checksum (content) is taken into account when extracting files
+ from the ISO image. If C(force=no), only checks the presence of the file.
+- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
+ requiring root access. This is no longer needed with the introduction of 7zip
+ for extraction.
+'''
+
+EXAMPLES = r'''
+- name: Extract kernel and ramdisk from a LiveCD
+ community.general.iso_extract:
+ image: /tmp/rear-test.iso
+ dest: /tmp/virt-rear/
+ files:
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+'''
+
+RETURN = r'''
+#
+'''
+
+import os.path
+import shutil
+import tempfile
+
+try: # python 3.3+
+ from shlex import quote
+except ImportError: # older python
+ from pipes import quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(type='path', required=True, aliases=['path', 'src']),
+ dest=dict(type='path', required=True),
+ files=dict(type='list', elements='str', required=True),
+ force=dict(type='bool', default=True, aliases=['thirsty']),
+ executable=dict(type='path'), # No default on purpose
+ ),
+ supports_check_mode=True,
+ )
+ image = module.params['image']
+ dest = module.params['dest']
+ files = module.params['files']
+ force = module.params['force']
+ executable = module.params['executable']
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = dict(
+ changed=False,
+ dest=dest,
+ image=image,
+ )
+
+ # We want to know if the user provided it or not, so we set default here
+ if executable is None:
+ executable = '7z'
+
+ binary = module.get_bin_path(executable, None)
+
+ # When executable was provided and binary not found, warn user !
+ if module.params['executable'] is not None and not binary:
+ module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
+
+ if not os.path.exists(dest):
+ module.fail_json(msg="Directory '%s' does not exist" % dest)
+
+ if not os.path.exists(os.path.dirname(image)):
+ module.fail_json(msg="ISO image '%s' does not exist" % image)
+
+ result['files'] = []
+ extract_files = list(files)
+
+ if not force:
+ # Check if we have to process any files based on existence
+ for f in files:
+ dest_file = os.path.join(dest, os.path.basename(f))
+ if os.path.exists(dest_file):
+ result['files'].append(dict(
+ checksum=None,
+ dest=dest_file,
+ src=f,
+ ))
+ extract_files.remove(f)
+
+ if not extract_files:
+ module.exit_json(**result)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ # Use 7zip when we have a binary, otherwise try to mount
+ if binary:
+ cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
+ else:
+ cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ result.update(dict(
+ cmd=cmd,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ ))
+ shutil.rmtree(tmp_dir)
+
+ if binary:
+ module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
+ else:
+ module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
+
+ try:
+ for f in extract_files:
+ tmp_src = os.path.join(tmp_dir, f)
+ if not os.path.exists(tmp_src):
+ module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
+
+ src_checksum = module.sha1(tmp_src)
+
+ dest_file = os.path.join(dest, os.path.basename(f))
+
+ if os.path.exists(dest_file):
+ dest_checksum = module.sha1(dest_file)
+ else:
+ dest_checksum = None
+
+ result['files'].append(dict(
+ checksum=src_checksum,
+ dest=dest_file,
+ src=f,
+ ))
+
+ if src_checksum != dest_checksum:
+ if not module.check_mode:
+ shutil.copy(tmp_src, dest_file)
+
+ result['changed'] = True
+ finally:
+ if not binary:
+ module.run_command('umount "%s"' % tmp_dir)
+
+ shutil.rmtree(tmp_dir)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py
new file mode 100644
index 00000000..7100d378
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: read_csv
+short_description: Read a CSV file
+description:
+- Read a CSV file and return a list or a dictionary, containing one dictionary per row.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ path:
+ description:
+ - The CSV filename to read data from.
+ type: path
+ required: yes
+ aliases: [ filename ]
+ key:
+ description:
+ - The column name used as a key for the resulting dictionary.
+ - If C(key) is unset, the module returns a list of dictionaries,
+ where each dictionary is a row in the CSV file.
+ type: str
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ unique:
+ description:
+ - Whether the C(key) used is expected to be unique.
+ type: bool
+ default: yes
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+notes:
+- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+'''
+
+EXAMPLES = r'''
+# Example CSV file with header
+#
+# name,uid,gid
+# dag,500,500
+# jeroen,501,500
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ community.general.read_csv:
+ path: users.csv
+ key: name
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}'
+
+# Example CSV file without header and semi-colon delimiter
+#
+# dag;500;500
+# jeroen;501;500
+
+# Read a CSV file without headers
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ fieldnames: name,uid,gid
+ delimiter: ';'
+ register: users
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dict:
+ description: The CSV content as a dictionary.
+ returned: success
+ type: dict
+ sample:
+ dag:
+ name: dag
+ uid: 500
+ gid: 500
+ jeroen:
+ name: jeroen
+ uid: 501
+ gid: 500
+list:
+ description: The CSV content as a list.
+ returned: success
+ type: list
+ sample:
+ - name: dag
+ uid: 500
+ gid: 500
+ - name: jeroen
+ uid: 501
+ gid: 500
+'''
+
+import csv
+from io import BytesIO, StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import PY3
+
+
+# Add Unix dialect from Python 3
+class unix_dialect(csv.Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+
+csv.register_dialect("unix", unix_dialect)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['filename']),
+ dialect=dict(type='str', default='excel'),
+ key=dict(type='str'),
+ fieldnames=dict(type='list', elements='str'),
+ unique=dict(type='bool', default=True),
+ delimiter=dict(type='str'),
+ skipinitialspace=dict(type='bool'),
+ strict=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ dialect = module.params['dialect']
+ key = module.params['key']
+ fieldnames = module.params['fieldnames']
+ unique = module.params['unique']
+
+ if dialect not in csv.list_dialects():
+ module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect)
+
+ dialect_options = dict(
+ delimiter=module.params['delimiter'],
+ skipinitialspace=module.params['skipinitialspace'],
+ strict=module.params['strict'],
+ )
+
+ # Create a dictionary from only set options
+ dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None)
+ if dialect_params:
+ try:
+ csv.register_dialect('custom', dialect, **dialect_params)
+ except TypeError as e:
+ module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e))
+ dialect = 'custom'
+
+ try:
+ with open(path, 'rb') as f:
+ data = f.read()
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unable to open file: %s" % to_text(e))
+
+ if PY3:
+ # Manually decode on Python3 so that we can use the surrogateescape error handler
+ data = to_text(data, errors='surrogate_or_strict')
+ fake_fh = StringIO(data)
+ else:
+ fake_fh = BytesIO(data)
+
+ reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
+
+ if key and key not in reader.fieldnames:
+ module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
+
+ data_dict = dict()
+ data_list = list()
+
+ if key is None:
+ try:
+ for row in reader:
+ data_list.append(row)
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+ else:
+ try:
+ for row in reader:
+ if unique and row[key] in data_dict:
+ module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
+ data_dict[row[key]] = row
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+
+ module.exit_json(dict=data_dict, list=data_list)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py
new file mode 100644
index 00000000..8b1449be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xattr
+short_description: Manage user defined extended attributes
+description:
+ - Manages filesystem user defined extended attributes.
+ - Requires that extended attributes are enabled on the target filesystem
+ and that the setfattr/getfattr utilities are present.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ - Before 2.3 this option was only usable as I(name).
+ type: path
+ required: true
+ aliases: [ name ]
+ namespace:
+ description:
+ - Namespace of the named name/key.
+ type: str
+ default: user
+ key:
+ description:
+ - The name of a specific Extended attribute key to set/retrieve.
+ type: str
+ value:
+ description:
+ - The value to set the named name/key to, it automatically sets the C(state) to 'set'.
+ type: str
+ state:
+ description:
+ - defines which state you want to do.
+ C(read) retrieves the current value for a C(key) (default)
+ C(present) sets C(name) to C(value), default if value is set
+ C(all) dumps all data
+ C(keys) retrieves all keys
+ C(absent) deletes the key
+ type: str
+ choices: [ absent, all, keys, present, read ]
+ default: read
+ follow:
+ description:
+ - If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
+ otherwise acts on symlink itself.
+ type: bool
+ default: yes
+notes:
+ - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Obtain the extended attributes of /etc/foo.conf
+ community.general.xattr:
+ path: /etc/foo.conf
+
+- name: Set the key 'user.foo' to value 'bar'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ value: bar
+
+- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ value: "0x817b94343f164f199e5b573b4ea1f914"
+
+- name: Remove the key 'user.foo'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ state: absent
+
+- name: Remove the key 'trusted.glusterfs.volume-id'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ state: absent
+'''
+
+import os
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_xattr_keys(module, path, follow):
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def get_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ if key is None:
+ cmd.append('-d')
+ else:
+ cmd.append('-n %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def set_xattr(module, path, key, value, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-n %s' % key)
+ cmd.append('-v %s' % value)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def rm_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-x %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def _run_xattr(module, cmd, check_rc=True):
+
+ try:
+ (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
+ except Exception as e:
+ module.fail_json(msg="%s!" % to_native(e))
+
+ # result = {'raw': out}
+ result = {}
+ for line in out.splitlines():
+ if line.startswith('#') or line == '':
+ pass
+ elif '=' in line:
+ (key, val) = line.split('=')
+ result[key] = val.strip('"')
+ else:
+ result[line] = ''
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name']),
+ namespace=dict(type='str', default='user'),
+ key=dict(type='str'),
+ value=dict(type='str'),
+ state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
+ follow=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ path = module.params.get('path')
+ namespace = module.params.get('namespace')
+ key = module.params.get('key')
+ value = module.params.get('value')
+ state = module.params.get('state')
+ follow = module.params.get('follow')
+
+ if not os.path.exists(path):
+ module.fail_json(msg="path not found or not accessible!")
+
+ changed = False
+ msg = ""
+ res = {}
+
+ if key is None and state in ['absent', 'present']:
+ module.fail_json(msg="%s needs a key parameter" % state)
+
+ # Prepend the key with the namespace if defined
+ if (
+ key is not None and
+ namespace is not None and
+ len(namespace) > 0 and
+ not (namespace == 'user' and key.startswith('user.'))):
+ key = '%s.%s' % (namespace, key)
+
+ if (state == 'present' or value is not None):
+ current = get_xattr(module, path, key, follow)
+ if current is None or key not in current or value != current[key]:
+ if not module.check_mode:
+ res = set_xattr(module, path, key, value, follow)
+ changed = True
+ res = current
+ msg = "%s set to %s" % (key, value)
+ elif state == 'absent':
+ current = get_xattr(module, path, key, follow)
+ if current is not None and key in current:
+ if not module.check_mode:
+ res = rm_xattr(module, path, key, follow)
+ changed = True
+ res = current
+ msg = "%s removed" % (key)
+ elif state == 'keys':
+ res = get_xattr_keys(module, path, follow)
+ msg = "returning all keys"
+ elif state == 'all':
+ res = get_xattr(module, path, None, follow)
+ msg = "dumping all"
+ else:
+ res = get_xattr(module, path, key, follow)
+ msg = "returning %s" % key
+
+ module.exit_json(changed=changed, msg=msg, xattr=res)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py
new file mode 100644
index 00000000..1733e657
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py
@@ -0,0 +1,958 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- ansible.builtin.debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ ansible.builtin.debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ community.general.xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ changed = False
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ changed = True
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in community.general 2.0.0 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in community.general 2.0.0 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute,
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py
new file mode 100644
index 00000000..e78eec4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: filesystem
+short_description: Makes a filesystem
+description:
+ - This module creates a filesystem.
+options:
+ state:
+ description:
+ - If C(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if I(state) is omitted.
+ - If C(state=absent), filesystem signatures on I(dev) are wiped if it
+ contains a filesystem (as known by C(blkid)).
+ - When C(state=absent), all other options but I(dev) are ignored, and the
+ module doesn't fail if the device I(dev) doesn't actually exist.
+ - C(state=absent) is not supported and will fail on FreeBSD systems.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ version_added: 1.3.0
+ fstype:
+ choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
+ description:
+ - Filesystem type to be created. This option is required with
+ C(state=present) (or if I(state) is omitted).
+ - reiserfs support was added in 2.2.
+ - lvm support was added in 2.5.
+ - since 2.5, I(dev) can be an image file.
+ - vfat support was added in 2.5
+ - ocfs2 support was added in 2.6
+ - f2fs support was added in 2.7
+ - swap support was added in 2.8
+ type: str
+ aliases: [type]
+ dev:
+ description:
+ - Target path to device or image file.
+ type: path
+ required: yes
+ aliases: [device]
+ force:
+ description:
+ - If C(yes), allows to create new filesystem on devices that already has filesystem.
+ type: bool
+ default: 'no'
+ resizefs:
+ description:
+ - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
+ - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
+ - XFS Will only grow if mounted. Currently, the module is based on commands
+ from C(util-linux) package to perform operations, so resizing of XFS is
+ not supported on FreeBSD systems.
+ - vFAT will likely fail if fatresize < 1.04.
+ type: bool
+ default: 'no'
+ opts:
+ description:
+ - List of options to be passed to mkfs command.
+ type: str
+requirements:
+ - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
+notes:
+ - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
+ this filesystem is overwritten even if I(force) is C(no).
+ - This module supports I(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a ext2 filesystem on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
+
+- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
+
+- name: Blank filesystem signature on /dev/sdb1
+ community.general.filesystem:
+ dev: /dev/sdb1
+ state: absent
+'''
+
+from distutils.version import LooseVersion
+import os
+import platform
+import re
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Device(object):
+ def __init__(self, module, path):
+ self.module = module
+ self.path = path
+
+ def size(self):
+ """ Return size in bytes of device. Returns int """
+ statinfo = os.stat(self.path)
+ if stat.S_ISBLK(statinfo.st_mode):
+ blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
+ _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
+ return int(devsize_in_bytes)
+ elif os.path.isfile(self.path):
+ return os.path.getsize(self.path)
+ else:
+ self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
+
+ def get_mountpoint(self):
+ """Return (first) mountpoint of device. Returns None when not mounted."""
+ cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
+
+ # find mountpoint
+ rc, mountpoint, _ = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
+ "TARGET", "--source", self.path], check_rc=False)
+ if rc != 0:
+ mountpoint = None
+ else:
+ mountpoint = mountpoint.split('\n')[0]
+
+ return mountpoint
+
+ def __str__(self):
+ return self.path
+
+
+class Filesystem(object):
+
+ GROW = None
+ MKFS = None
+ MKFS_FORCE_FLAGS = ''
+
+ LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+
+ def __init__(self, module):
+ self.module = module
+
+ @property
+ def fstype(self):
+ return type(self).__name__
+
+ def get_fs_size(self, dev):
+ """ Return size in bytes of filesystem on device. Returns int """
+ raise NotImplementedError()
+
+ def create(self, opts, dev):
+ if self.module.check_mode:
+ return
+
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ if opts is None:
+ cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
+ else:
+ cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
+ self.module.run_command(cmd, check_rc=True)
+
+ def wipefs(self, dev):
+ if platform.system() == 'FreeBSD':
+ msg = "module param state=absent is currently not supported on this OS (FreeBSD)."
+ self.module.fail_json(msg=msg)
+
+ if self.module.check_mode:
+ return
+
+ # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
+ # so it is not supported on FreeBSD. Even the use of dd as a fallback is
+ # not doable here if it needs get_mountpoint() (to prevent corruption of
+ # a mounted filesystem), since 'findmnt' is not available on FreeBSD.
+ wipefs = self.module.get_bin_path('wipefs', required=True)
+ cmd = [wipefs, "--all", dev.__str__()]
+ self.module.run_command(cmd, check_rc=True)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ return [cmd, str(dev)]
+
+ def grow(self, dev):
+ """Get dev and fs size and compare. Returns stdout of used command."""
+ devsize_in_bytes = dev.size()
+
+ try:
+ fssize_in_bytes = self.get_fs_size(dev)
+ except NotImplementedError:
+ self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
+
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+ elif self.module.check_mode:
+ self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
+ else:
+ _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
+ return out
+
+
+class Ext(Filesystem):
+ MKFS_FORCE_FLAGS = '-F'
+ GROW = 'resize2fs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('tune2fs', required=True)
+ # Get Block count and Block size
+ _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ return block_size * block_count
+
+
+class Ext2(Ext):
+ MKFS = 'mkfs.ext2'
+
+
+class Ext3(Ext):
+ MKFS = 'mkfs.ext3'
+
+
+class Ext4(Ext):
+ MKFS = 'mkfs.ext4'
+
+
+class XFS(Filesystem):
+ MKFS = 'mkfs.xfs'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'xfs_growfs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('xfs_info', required=True)
+
+ mountpoint = dev.get_mountpoint()
+ if mountpoint:
+ rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV)
+ else:
+ # Recent GNU/Linux distros support access to unmounted XFS filesystems
+ rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV)
+ if rc != 0:
+ self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err)
+
+ for line in out.splitlines():
+ col = line.split('=')
+ if col[0].strip() == 'data':
+ if col[1].strip() != 'bsize':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")')
+ if col[2].split()[1] != 'blocks':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")')
+ block_size = int(col[2].split()[0])
+ block_count = int(col[3].split(',')[0])
+ return block_size * block_count
+
+ def grow_cmd(self, dev):
+ # Check first if growing is needed, and then if it is doable or not.
+ devsize_in_bytes = dev.size()
+ fssize_in_bytes = self.get_fs_size(dev)
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ # xfs filesystem needs to be mounted
+ self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev)
+
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+
+ return [cmd, str(mountpoint)]
+
+
+class Reiserfs(Filesystem):
+ MKFS = 'mkfs.reiserfs'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+class Btrfs(Filesystem):
+ MKFS = 'mkfs.btrfs'
+
+ def __init__(self, module):
+ super(Btrfs, self).__init__(module)
+ _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
+ match = re.search(r" v([0-9.]+)", stdout)
+ if not match:
+ # v0.20-rc1 use stderr
+ match = re.search(r" v([0-9.]+)", stderr)
+ if match:
+ # v0.20-rc1 doesn't have --force parameter added in following version v3.12
+ if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
+ self.MKFS_FORCE_FLAGS = '-f'
+ else:
+ self.MKFS_FORCE_FLAGS = ''
+ else:
+ # assume version is greater or equal to 3.12
+ self.MKFS_FORCE_FLAGS = '-f'
+ self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
+
+
+class Ocfs2(Filesystem):
+ MKFS = 'mkfs.ocfs2'
+ MKFS_FORCE_FLAGS = '-Fx'
+
+
+class F2fs(Filesystem):
+ MKFS = 'mkfs.f2fs'
+ GROW = 'resize.f2fs'
+
+ @property
+ def MKFS_FORCE_FLAGS(self):
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ cmd = "%s %s" % (mkfs, os.devnull)
+ _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
+ # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
+ # mkfs.f2fs displays version since v1.2.0
+ match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
+ if match is not None:
+ # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
+ # before that version -f switch wasn't used
+ if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
+ return '-f'
+
+ return ''
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('dump.f2fs', required=True)
+ # Get sector count and sector size
+ _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ sector_size = None
+ sector_count = None
+ for line in dump.splitlines():
+ if 'Info: sector size = ' in line:
+ # expected: 'Info: sector size = 512'
+ sector_size = int(line.split()[4])
+ elif 'Info: total FS sectors = ' in line:
+ # expected: 'Info: total FS sectors = 102400 (50 MB)'
+ sector_count = int(line.split()[5])
+
+ if None not in (sector_size, sector_count):
+ break
+ else:
+ self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
+ self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
+
+ return sector_size * sector_count
+
+
+class VFAT(Filesystem):
+ if platform.system() == 'FreeBSD':
+ MKFS = "newfs_msdos"
+ else:
+ MKFS = 'mkfs.vfat'
+ GROW = 'fatresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in output.splitlines()[1:]:
+ param, value = line.split(':', 1)
+ if param.strip() == 'Size':
+ return int(value.strip())
+ self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW)
+ return [cmd, "-s", str(dev.size()), str(dev.path)]
+
+
+class LVM(Filesystem):
+ MKFS = 'pvcreate'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'pvresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('pvs', required=True)
+ _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
+ block_count = int(size)
+ return block_count
+
+
+class Swap(Filesystem):
+ MKFS = 'mkswap'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+FILESYSTEMS = {
+ 'ext2': Ext2,
+ 'ext3': Ext3,
+ 'ext4': Ext4,
+ 'ext4dev': Ext4,
+ 'f2fs': F2fs,
+ 'reiserfs': Reiserfs,
+ 'xfs': XFS,
+ 'btrfs': Btrfs,
+ 'vfat': VFAT,
+ 'ocfs2': Ocfs2,
+ 'LVM2_member': LVM,
+ 'swap': Swap,
+}
+
+
+def main():
+ friendly_names = {
+ 'lvm': 'LVM2_member',
+ }
+
+ fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
+ dev=dict(type='path', required=True, aliases=['device']),
+ opts=dict(type='str'),
+ force=dict(type='bool', default=False),
+ resizefs=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('state', 'present', ['fstype'])
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.params['force']
+ resizefs = module.params['resizefs']
+
+ changed = False
+
+ if not os.path.exists(dev):
+ msg = "Device %s not found." % dev
+ if state == "present":
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(msg=msg)
+
+ dev = Device(module, dev)
+
+ cmd = module.get_bin_path('blkid', required=True)
+ rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
+ # In case blkid isn't able to identify an existing filesystem, device is considered as empty,
+ # then this existing filesystem would be overwritten even if force isn't enabled.
+ fs = raw_fs.strip()
+
+ if state == "present":
+ if fstype in friendly_names:
+ fstype = friendly_names[fstype]
+
+ try:
+ klass = FILESYSTEMS[fstype]
+ except KeyError:
+ module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
+
+ filesystem = klass(module)
+
+ same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
+ if same_fs and not resizefs and not force:
+ module.exit_json(changed=False)
+ elif same_fs and resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
+
+ # create fs
+ filesystem.create(opts, dev)
+ changed = True
+
+ elif fs:
+ # wipe fs signatures
+ filesystem = Filesystem(module)
+ filesystem.wipefs(dev)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py
new file mode 100644
index 00000000..1be1a722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak
+short_description: Manage flatpaks
+description:
+- Allows users to add or remove flatpaks.
+- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: path
+ default: flatpak
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The name of the flatpak to manage.
+ - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a
+ C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
+ - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ format.
+ - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the
+ installed flatpak based on the name of the flatpakref to remove it. However, there is no
+ guarantee that the names of the flatpakref file and the reverse DNS name of the installed
+ flatpak do match.
+ type: str
+ required: true
+ remote:
+ description:
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ type: str
+ default: flathub
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ type: str
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Install the spotify flatpak
+ community.general.flatpak:
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ state: present
+
+- name: Install the gedit flatpak package
+ community.general.flatpak:
+ name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
+ state: present
+
+- name: Install the gedit package from flathub for current user
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: present
+ method: user
+
+- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
+ community.general.flatpak:
+ name: org.gnome.Calendar
+ state: present
+ remote: gnome
+
+- name: Remove the gedit flatpak
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
+'''
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.basic import AnsibleModule
+
+OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
+
+
+def install_flat(module, binary, remote, name, method):
+ """Add a new flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ if name.startswith('http://') or name.startswith('https://'):
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, name]
+ else:
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def uninstall_flat(module, binary, name, method):
+ """Remove an existing flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ installed_flat_name = _match_installed_flat_name(module, binary, name, method)
+ command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def flatpak_exists(module, binary, name, method):
+ """Check if the flatpak is installed."""
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ name = _parse_flatpak_name(name).lower()
+ if name in output.lower():
+ return True
+ return False
+
+
+def _match_installed_flat_name(module, binary, name, method):
+ # This is a difficult function, since if the user supplies a flatpakref url,
+ # we have to rely on a naming convention:
+ # The flatpakref file name needs to match the flatpak name
+ global result
+ parsed_name = _parse_flatpak_name(name)
+ # Try running flatpak list with columns feature
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ _flatpak_command(module, False, command, ignore_failure=True)
+ if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
+ # Probably flatpak before 1.2
+ matched_flatpak_name = \
+ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
+ else:
+ # Probably flatpak >= 1.2
+ matched_flatpak_name = \
+ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
+
+ if matched_flatpak_name:
+ return matched_flatpak_name
+ else:
+ result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
+ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\
+ "If you used a URL, try using the reverse DNS name of the flatpak"
+ module.fail_json(**result)
+
+
+def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() == row.lower():
+ return row
+
+
+def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() in row.lower():
+ return row.split()[0]
+
+
+def _parse_flatpak_name(name):
+ if name.startswith('http://') or name.startswith('https://'):
+ file_name = urlparse(name).path.split('/')[-1]
+ file_name_without_extension = file_name.split('.')[0:-1]
+ common_name = ".".join(file_name_without_extension)
+ else:
+ common_name = name
+ return common_name
+
+
+def _flatpak_version(module, binary):
+ global result
+ command = [binary, "--version"]
+ output = _flatpak_command(module, False, command)
+ version_number = output.split()[1]
+ return version_number
+
+
+def _flatpak_command(module, noop, command, ignore_failure=False):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=not ignore_failure
+ )
+ return result['stdout']
+
+
+def main():
+ # This module supports check mode
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ remote=dict(type='str', default='flathub'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ executable=dict(type='path', default='flatpak')
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ remote = module.params['remote']
+ method = module.params['method']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ if state == 'present' and not flatpak_exists(module, binary, name, method):
+ install_flat(module, binary, remote, name, method)
+ elif state == 'absent' and flatpak_exists(module, binary, name, method):
+ uninstall_flat(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py
new file mode 100644
index 00000000..dbb211c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(community.general.flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: str
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ type: str
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = [binary, "remote-list", "-d", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=True
+ )
+ return result['stdout']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py
new file mode 100644
index 00000000..a1842c5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: flowdock
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ type:
+ type: str
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ type: str
+ description:
+ - Content of the message
+ required: true
+ tags:
+ type: str
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ type: str
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ type: str
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ type: str
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ type: str
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ type: str
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ type: str
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ type: str
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ type: str
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox", "chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="external_user_name is required for the 'chat' type")
+
+ # required params for the 'inbox' type
+ for item in ['from_address', 'source', 'subject']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in ['from_name', 'reply_to', 'project', 'link']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py
new file mode 100644
index 00000000..b209b05a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: foreman
+short_description: Manage Foreman Resources
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Foreman resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ verify_ssl:
+ description:
+ - Whether to verify an SSL connection to Foreman server.
+ type: bool
+ default: False
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description).
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create CI Organization
+ community.general.foreman:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: organization
+ params:
+ name: My Cool New Organization
+ delegate_to: localhost
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+try:
+ from nailgun import entities
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+
+ return None
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True),
+ verify_ssl=dict(type='bool', default=False),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e),
+ exception=traceback.format_exc())
+
+ if entity == 'organization':
+ ng.organization(params)
+ module.exit_json(changed=True, result="%s updated" % entity)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py
new file mode 100644
index 00000000..52ca18fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.general.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.general.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.general.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py
new file mode 100644
index 00000000..b97377b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_resource_record_set) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given resource record should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ type: str
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ zone_id:
+ type: str
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ type:
+ type: str
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ type: list
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ type: int
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ type: bool
+ default: 'no'
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+- name: Create an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+- name: Update an existing record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+- name: Remove an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+- name: Create a CNAME record. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.'
+
+- name: Create an MX record with a custom TTL. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.'
+
+- name: Create multiple A records with the same name
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Change the value of an existing record with multiple record_data
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Safely remove a multi-line record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Unconditionally remove a record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+- name: Create an AAAA record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+- name: Create a PTR record
+ community.general.gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+- name: Create an NS record
+ community.general.gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+- name: Create a TXT record
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: bool
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: str
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: str
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: str
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: str
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: str
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg='cannot overwrite existing record, overwrite protection enabled',
+ changed=False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg='value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed=False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg="non-CNAME resource record already exists: %s" % record_name,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg='error updating record, the original record was restored',
+ changed=False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can restore it if
+ # necessary.
+ module.fail_json(
+ msg='error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed=True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg='cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed=False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg='TTL cannot be less than zero, got: %d' % ttl,
+ changed=False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid A record value, got: %s' % value,
+ changed=False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid AAAA record value, got: %s' % value,
+ changed=False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg='CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg="wildcard NS records not allowed, got: %s" % record_name,
+ changed=False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg='TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed=False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg='CNAME records cannot match the zone name',
+ changed=False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg='cannot delete root NS records',
+ changed=False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg='cannot update existing root NS records',
+ changed=False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg='non-root SOA records are not permitted, got: %s' % record_name,
+ changed=False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ record=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(type='str'),
+ zone_id=dict(type='str'),
+ type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data=dict(aliases=['value'], type='list'),
+ ttl=dict(default=300, type='int'),
+ overwrite=dict(default=False, type='bool'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ required_if=[
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of=[['zone', 'zone_id']],
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state=state,
+ record=record_name,
+ zone=zone_name,
+ zone_id=zone_id,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl,
+ overwrite=module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg='zone name was not found: %s' % zone_name,
+ changed=False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg='zone id was not found: %s' % zone_id,
+ changed=False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg='record name is invalid: %s' % record_name,
+ changed=False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record=record.data['name'],
+ type=record.data['type'],
+ record_data=record.data['rrdatas'],
+ ttl=record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record=record_name,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py
new file mode 100644
index 00000000..6f66b5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_managed_zone) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given zone should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ type: str
+ description:
+ - An arbitrary text string to use for the zone description.
+ default: ""
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ community.general.gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ community.general.gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ community.general.gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: str
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: str
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: str
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description=description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg="zone name is not a valid DNS name: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg="zone name is reserved or already in use: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg='cannot create top-level domain: %s' % zone_name,
+ changed=False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ zone=dict(required=True, aliases=['name'], type='str'),
+ description=dict(default='', type='str'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state=state,
+ zone=zone_name,
+ description=module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone=zone.domain,
+ description=zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone=zone_name,
+ description=module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py
new file mode 100644
index 00000000..7e658786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/compute) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_instance) instead.
+options:
+ image:
+ type: str
+ description:
+ - image string to use for the instance (default will follow latest
+ stable debian image)
+ default: "debian-8"
+ image_family:
+ type: str
+ description:
+ - image family from which to select the image. The most recent
+ non-deprecated image in the family will be used.
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+ instance_names:
+ type: str
+ description:
+ - a comma-separated list of instance names to create or destroy
+ machine_type:
+ type: str
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ default: "n1-standard-1"
+ metadata:
+ type: str
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ name:
+ type: str
+ description:
+ - either a name of a single instance or when used with 'num_instances',
+ the base name of a cluster of nodes
+ aliases: ['base_name']
+ num_instances:
+ type: int
+ description:
+ - can be used with 'name', specifies
+ the number of nodes to provision using 'name'
+ as a base name
+ network:
+ type: str
+ description:
+ - name of the network, 'default' will be used if not specified
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - name of the subnetwork in which the instance should be created
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ type: bool
+ default: 'no'
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ zone:
+ type: str
+ description:
+ - the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
+ default: "us-central1-a"
+ ip_forward:
+ description:
+ - set to C(yes) if the instance can forward ip packets (useful for
+ gateways)
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
+ default: "ephemeral"
+ disk_auto_delete:
+ description:
+ - if set boot disk will be removed after instance destruction
+ type: bool
+ default: 'yes'
+ preemptible:
+ description:
+ - if set to C(yes), instances will be preemptible and time-limited.
+ (requires libcloud >= 0.20.0)
+ type: bool
+ disk_size:
+ type: int
+ description:
+ - The size of the boot disk created for this instance (in GB)
+ default: 10
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - Either I(instance_names) or I(name) is required.
+ - JSON credentials strongly preferred.
+author:
+ - Eric Johnson (@erjohnso) <erjohnso@google.com>
+ - Tom Melendez (@supertom) <supertom@google.com>
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 8 instance in the
+# us-central1-a Zone of the n1-standard-1 machine type.
+# Create multiple instances by specifying multiple names, separated by
+# commas in the instance_names field
+# (e.g. my-test-instance1,my-test-instance2)
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single instance of an image from the "my-base-image" image family
+# in the us-central1-a Zone of the n1-standard-1 machine type.
+# This image family is in the "my-other-project" GCP project.
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image_family: my-base-image
+ external_projects:
+ - my-other-project
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single Debian 8 instance in the us-central1-a Zone
+# Use existing disks, custom network/subnetwork, set service account permissions
+# add tags and metadata.
+ - community.general.gce:
+ instance_names: my-test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ state: present
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+ tags:
+ - http-server
+ - my-other-tag
+ disks:
+ - name: disk-2
+ mode: READ_WRITE
+ - name: disk-3
+ mode: READ_ONLY
+ disk_auto_delete: false
+ network: foobar-network
+ subnetwork: foobar-subnetwork-1
+ preemptible: true
+ ip_forward: true
+ service_account_permissions:
+ - storage-full
+ - taskqueue
+ - bigquery
+ - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+---
+# Example Playbook
+- name: Compute Engine Instance Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create multiple instances
+ # Basic provisioning example. Create multiple Debian 8 instances in the
+ # us-central1-a Zone of n1-standard-1 machine type.
+ community.general.gce:
+ instance_names: test1,test2,test3
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ metadata : '{ "startup-script" : "apt-get update" }'
+ register: gce
+
+ - name: Save host data
+ ansible.builtin.add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: gce_instances_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for SSH for instances
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 30
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Configure Hosts
+ hosts: gce_instances_ips
+ become: yes
+ become_method: sudo
+ roles:
+ - my-role-one
+ - my-role-two
+ tags:
+ - config
+
+ - name: Delete test-instances
+ # Basic termination of instance.
+ community.general.gce:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ instance_names: "{{ gce.instance_names }}"
+ zone: us-central1-a
+ state: absent
+ tags:
+ - delete
+'''
+
+import socket
+import logging
+
+try:
+ from ast import literal_eval
+
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+from ansible_collections.community.general.plugins.module_utils.gcp import get_valid_location
+from ansible.module_utils.six.moves import reduce
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except Exception:
+ netname = None
+ try:
+ subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ except Exception:
+ subnetname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return ({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'subnetwork': subnetname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names, number, lc_zone):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ community.general.gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+ number: number of instances to create
+ lc_zone: GCEZone object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ tags = module.params.get('tags')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ preemptible = module.params.get('preemptible')
+ disk_size = module.params.get('disk_size')
+ service_account_permissions = module.params.get('service_account_permissions')
+
+ if external_ip == "none":
+ instance_external_ip = None
+ elif external_ip != "ephemeral":
+ instance_external_ip = external_ip
+ try:
+ # check if instance_external_ip is an ip or a name
+ try:
+ socket.inet_aton(instance_external_ip)
+ instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
+ except socket.error:
+ instance_external_ip = gce.ex_get_address(instance_external_ip)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
+ else:
+ instance_external_ip = external_ip
+
+ new_instances = []
+ changed = False
+
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk, lc_zone))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ gce_args = dict(
+ location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_can_ip_forward=ip_forward,
+ external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ if preemptible is not None:
+ gce_args['ex_preemptible'] = preemptible
+ if subnetwork is not None:
+ gce_args['ex_subnetwork'] = subnetwork
+
+ if isinstance(instance_names, str) and not number:
+ instance_names = [instance_names]
+
+ if isinstance(instance_names, str) and number:
+ instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
+ lc_image(), number, **gce_args)
+ for resp in instance_responses:
+ n = resp
+ if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
+ try:
+ n = gce.ex_get_node(n.name, lc_zone)
+ except ResourceNotFoundError:
+ pass
+ else:
+ # Assure that at least one node has been created to set changed=True
+ changed = True
+ new_instances.append(n)
+ else:
+ for instance in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.ex_get_volume("%s" % instance, lc_zone)
+ except ResourceNotFoundError:
+ pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
+ gce_args['ex_boot_disk'] = pd
+
+ inst = None
+ try:
+ inst = gce.ex_get_node(instance, lc_zone)
+ except ResourceNotFoundError:
+ inst = gce.create_node(
+ instance, lc_machine_type, lc_image(), **gce_args
+ )
+ changed = True
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (instance, e.value))
+ if inst:
+ new_instances.append(inst)
+
+ for inst in new_instances:
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i + 1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def change_instance_state(module, gce, instance_names, number, zone, state):
+ """Changes the state of a list of instances. For example,
+ change from started to stopped, or started to absent.
+
+ module: Ansible module object
+ community.general.gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone: GCEZone object where the instances reside prior to termination
+ state: 'state' parameter passed into module as argument
+
+ Returns a dictionary of instance names that were changed.
+
+ """
+ changed = False
+ nodes = []
+ state_instance_names = []
+
+ if isinstance(instance_names, str) and number:
+ node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
+ elif isinstance(instance_names, str) and not number:
+ node_names = [instance_names]
+ else:
+ node_names = instance_names
+
+ for name in node_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone)
+ except ResourceNotFoundError:
+ state_instance_names.append(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ else:
+ nodes.append(inst)
+ state_instance_names.append(name)
+
+ if state in ['absent', 'deleted'] and number:
+ changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
+ changed = reduce(lambda x, y: x or y, changed_nodes)
+ else:
+ for node in nodes:
+ if state in ['absent', 'deleted']:
+ gce.destroy_node(node)
+ changed = True
+ elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
+ gce.ex_start_node(node)
+ changed = True
+ elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
+ gce.ex_stop_node(node)
+ changed = True
+
+ return (changed, state_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-8'),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(aliases=['base_name']),
+ num_instances=dict(type='int'),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted',
+ 'started', 'stopped', 'terminated'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ disk_size=dict(type='int', default=10),
+ preemptible=dict(type='bool', default=None),
+ ),
+ mutually_exclusive=[('instance_names', 'name')]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ instance_names = module.params.get('instance_names')
+ name = module.params.get('name')
+ number = module.params.get('num_instances')
+ subnetwork = module.params.get('subnetwork')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+ preemptible = module.params.get('preemptible')
+ changed = False
+
+ inames = None
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames = name
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ lc_zone = get_valid_location(module, gce, zone)
+ if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
+ module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
+ changed=False)
+
+ if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
+ changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
+ json_output['state'] = state
+ (changed, state_instance_names) = change_instance_state(
+ module, gce, inames, number, lc_zone, state)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names or name and number:
+ json_output['instance_names'] = state_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames, number, lc_zone)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+class LazyDiskImage:
+ """
+ Object for lazy instantiation of disk image
+ gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
+ """
+
+ def __init__(self, module, gce, name, has_pd, family=None, projects=None):
+ self.image = None
+ self.was_called = False
+ self.gce = gce
+ self.name = name
+ self.has_pd = has_pd
+ self.module = module
+ self.family = family
+ self.projects = projects
+
+ def __call__(self):
+ if not self.was_called:
+ self.was_called = True
+ if not self.has_pd:
+ if self.family:
+ self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
+ else:
+ self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
+ if not self.image:
+ self.module.fail_json(msg='image or disks missing for create instance', changed=False)
+ return self.image
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py
new file mode 100644
index 00000000..b5fd4bf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py
new file mode 100644
index 00000000..c4705098
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.general.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.general.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.general.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.general.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py
new file mode 100644
index 00000000..04ddacce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py
new file mode 100644
index 00000000..dced7599
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py
new file mode 100644
index 00000000..50e26a58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py
new file mode 100644
index 00000000..42db08bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py
new file mode 100644
index 00000000..48971ae7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.general.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.general.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.general.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.general.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py
new file mode 100644
index 00000000..7e60285f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py
new file mode 100644
index 00000000..4fca1b05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py
new file mode 100644
index 00000000..1e36ed4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.general.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.general.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.general.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py
new file mode 100644
index 00000000..b1df1da8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Edit GNOME Configurations
+description:
+ - This module allows for the manipulation of GNOME 2 Configuration via
+ gconftool-2. Please see the gconftool-2(1) man pages for more details.
+options:
+ key:
+ type: str
+ description:
+ - A GConf preference key is an element in the GConf repository
+ that corresponds to an application preference. See man gconftool-2(1)
+ required: yes
+ value:
+ type: str
+ description:
+ - Preference keys typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". See man gconftool-2(1)
+ value_type:
+ type: str
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ choices: [ bool, float, int, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the key/value.
+ required: yes
+ choices: [ absent, get, present ]
+ config_source:
+ type: str
+ description:
+ - Specify a configuration source to use rather than the default path.
+ See man gconftool-2(1)
+ direct:
+ description:
+ - Access the config database directly, bypassing server. If direct is
+ specified then the config_source must be specified as well.
+ See man gconftool-2(1)
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = """
+- name: Change the widget font to "Serif 12"
+ community.general.gconftool2:
+ key: "/desktop/gnome/interface/font_name"
+ value_type: "string"
+ value: "Serif 12"
+"""
+
+RETURN = '''
+ key:
+ description: The key specified in the module parameters
+ returned: success
+ type: str
+ sample: /desktop/gnome/interface/font_name
+ value_type:
+ description: The type of the value that was changed
+ returned: success
+ type: str
+ sample: string
+ value:
+ description: The value of the preference key after executing the module
+ returned: success
+ type: str
+ sample: "Serif 12"
+...
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class GConf2Preference(object):
+ def __init__(self, ansible, key, value_type, value,
+ direct=False, config_source=""):
+ self.ansible = ansible
+ self.key = key
+ self.value_type = value_type
+ self.value = value
+ self.config_source = config_source
+ self.direct = direct
+
+ def value_already_set(self):
+ return False
+
+ def call(self, call_type, fail_onerr=True):
+ """ Helper function to perform gconftool-2 operations """
+ config_source = ''
+ direct = ''
+ changed = False
+ out = ''
+
+ # If the configuration source is different from the default, create
+ # the argument
+ if self.config_source is not None and len(self.config_source) > 0:
+ config_source = "--config-source " + self.config_source
+
+ # If direct is true, create the argument
+ if self.direct:
+ direct = "--direct"
+
+ # Execute the call
+ cmd = "gconftool-2 "
+ try:
+ # If the call is "get", then we don't need as many parameters and
+ # we can ignore some
+ if call_type == 'get':
+ cmd += "--get {0}".format(self.key)
+ # Otherwise, we will use all relevant parameters
+ elif call_type == 'set':
+ cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
+ config_source,
+ self.value_type,
+ call_type,
+ self.key,
+ self.value)
+ elif call_type == 'unset':
+ cmd += "--unset {0}".format(self.key)
+
+ # Start external command
+ rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True)
+
+ if len(err) > 0:
+ if fail_onerr:
+ self.ansible.fail_json(msg='gconftool-2 failed with '
+ 'error: %s' % (str(err)))
+ else:
+ changed = True
+
+ except OSError as exception:
+ self.ansible.fail_json(msg='gconftool-2 failed with exception: '
+ '%s' % exception)
+ return changed, out.rstrip()
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(type='str', required=True),
+ value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
+ value=dict(type='str'),
+ state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ direct=dict(type='bool', default=False),
+ config_source=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ state_values = {"present": "set", "absent": "unset", "get": "get"}
+
+ # Assign module values to dictionary values
+ key = module.params['key']
+ value_type = module.params['value_type']
+ if module.params['value'].lower() == "true":
+ value = "true"
+ elif module.params['value'] == "false":
+ value = "false"
+ else:
+ value = module.params['value']
+
+ state = state_values[module.params['state']]
+ direct = module.params['direct']
+ config_source = module.params['config_source']
+
+ # Initialize some variables for later
+ change = False
+ new_value = ''
+
+ if state != "get":
+ if value is None or value == "":
+ module.fail_json(msg='State %s requires "value" to be set'
+ % str(state))
+ elif value_type is None or value_type == "":
+ module.fail_json(msg='State %s requires "value_type" to be set'
+ % str(state))
+
+ if direct and config_source is None:
+ module.fail_json(msg='If "direct" is "yes" then the ' +
+ '"config_source" must be specified')
+ elif not direct and config_source is not None:
+ module.fail_json(msg='If the "config_source" is specified ' +
+ 'then "direct" must be "yes"')
+
+ # Create a gconf2 preference
+ gconf_pref = GConf2Preference(module, key, value_type,
+ value, direct, config_source)
+ # Now we get the current value, if not found don't fail
+ _, current_value = gconf_pref.call("get", fail_onerr=False)
+
+ # Check if the current value equals the value we want to set. If not, make
+ # a change
+ if current_value != value:
+ # If check mode, we know a change would have occurred.
+ if module.check_mode:
+ # So we will set the change to True
+ change = True
+ # And set the new_value to the value that would have been set
+ new_value = value
+ # If not check mode make the change.
+ else:
+ change, new_value = gconf_pref.call(state)
+ # If the value we want to set is the same as the current_value, we will
+ # set the new_value to the current_value for reporting
+ else:
+ new_value = current_value
+
+ facts = dict(gconftool2={'changed': change,
+ 'key': key,
+ 'value_type': value_type,
+ 'new_value': new_value,
+ 'previous_value': current_value,
+ 'playbook_value': module.params['value']})
+
+ module.exit_json(changed=change, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py
new file mode 100644
index 00000000..ee564ae0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gcp_backend_service
+short_description: Create or Destroy a Backend Service.
+description:
+ - Create or Destroy a Backend Service. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service) for an overview.
+ Full install/configuration instructions for the Google Cloud modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.3.0"
+notes:
+ - Update is not currently supported.
+ - Only global backend services are currently supported. Regional backends not currently supported.
+ - Internal load balancing not currently supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_backend_service) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ backend_service_name:
+ type: str
+ description:
+ - Name of the Backend Service.
+ required: true
+ backends:
+ type: list
+ description:
+ - List of backends that make up the backend service. A backend is made up of
+ an instance group and optionally several other parameters. See
+ U(https://cloud.google.com/compute/docs/reference/latest/backendServices)
+ for details.
+ required: true
+ healthchecks:
+ type: list
+ description:
+ - List of healthchecks. Only one healthcheck is supported.
+ required: true
+ enable_cdn:
+ description:
+ - If true, enable Cloud CDN for this Backend Service.
+ type: bool
+ port_name:
+ type: str
+ description:
+ - Name of the port on the managed instance group (MIG) that backend
+ services can forward data to. Required for external load balancing.
+ protocol:
+ type: str
+ description:
+ - The protocol this Backend Service uses to communicate with backends.
+ Possible values are HTTP, HTTPS, TCP, and SSL. The default is TCP.
+ choices: [HTTP, HTTPS, TCP, SSL]
+ default: TCP
+ required: false
+ timeout:
+ type: int
+ description:
+ - How many seconds to wait for the backend before considering it a failed
+ request. Default is 30 seconds. Valid range is 1-86400.
+ required: false
+ service_account_email:
+ type: str
+ description:
+ - Service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email.
+ pem_file:
+ type: str
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - GCE project ID.
+ state:
+ type: str
+ description:
+ - Desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum Backend Service
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+
+- name: Create BES with extended backend parameters
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ max_utilization: 0.6
+ max_rate: 10
+ - instance_group: managed_instance_group_2
+ max_utilization: 0.5
+ max_rate: 4
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+ timeout: 60
+'''
+
+RETURN = '''
+backend_service_created:
+ description: Indicator Backend Service was created.
+ returned: When a Backend Service is created.
+ type: bool
+ sample: "True"
+backend_service_deleted:
+ description: Indicator Backend Service was deleted.
+ returned: When a Backend Service is deleted.
+ type: bool
+ sample: "True"
+backend_service_name:
+ description: Name of the Backend Service.
+ returned: Always.
+ type: str
+ sample: "my-backend-service"
+backends:
+ description: List of backends (comprised of instance_group) that
+ make up a Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ { 'instance_group': 'mig_one', 'zone': 'us-central1-b'} ]"
+enable_cdn:
+ description: If Cloud CDN is enabled. null if not set.
+ returned: When a backend service exists.
+ type: bool
+ sample: "True"
+healthchecks:
+ description: List of healthchecks applied to the Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ 'my-healthcheck' ]"
+protocol:
+ description: Protocol used to communicate with the Backends.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "HTTP"
+port_name:
+ description: Name of Backend Port.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "myhttpport"
+timeout:
+ description: In seconds, how long before a request sent to a backend is
+ considered failed.
+ returned: If specified.
+ type: int
+ sample: "myhttpport"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params
+
+
+def _validate_params(params):
+ """
+ Validate backend_service params.
+
+ This function calls _validate_backend_params to verify
+ the backend-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'timeout', 'type': int, 'min': 1, 'max': 86400},
+ ]
+ try:
+ check_params(params, fields)
+ _validate_backend_params(params['backends'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_backend_params(backends):
+ """
+ Validate configuration for backends.
+
+ :param backends: Ansible dictionary containing backends configuration (only).
+ :type backends: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'balancing_mode', 'type': str, 'values': ['UTILIZATION', 'RATE', 'CONNECTION']},
+ {'name': 'max_utilization', 'type': float},
+ {'name': 'max_connections', 'type': int},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+
+ if not backends:
+ raise ValueError('backends should be a list.')
+
+ for backend in backends:
+ try:
+ check_params(backend, fields)
+ except Exception:
+ raise
+
+ if 'max_rate' in backend and 'max_rate_per_instance' in backend:
+ raise ValueError('Both maxRate or maxRatePerInstance cannot be set.')
+
+ return (True, '')
+
+
+def get_backend_service(gce, name):
+ """
+ Get a Backend Service from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Backend Service.
+ :type name: ``str``
+
+ :return: A GCEBackendService object or None.
+ :rtype: :class: `GCEBackendService` or None
+ """
+ try:
+ # Does the Backend Service already exist?
+ return gce.ex_get_backendservice(name=name)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def get_healthcheck(gce, name):
+ return gce.ex_get_healthcheck(name)
+
+
+def get_instancegroup(gce, name, zone=None):
+ return gce.ex_get_instancegroup(name=name, zone=zone)
+
+
+def create_backend_service(gce, params):
+ """
+ Create a new Backend Service.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats
+ :rtype: tuple in the format of (bool, bool)
+ """
+ from copy import deepcopy
+
+ changed = False
+ return_data = False
+ # only one healthcheck is currently supported
+ hc_name = params['healthchecks'][0]
+ hc = get_healthcheck(gce, hc_name)
+ backends = []
+ for backend in params['backends']:
+ ig = get_instancegroup(gce, backend['instance_group'],
+ backend.get('zone', None))
+ kwargs = deepcopy(backend)
+ kwargs['instance_group'] = ig
+ backends.append(gce.ex_create_backend(
+ **kwargs))
+
+ bes = gce.ex_create_backendservice(
+ name=params['backend_service_name'], healthchecks=[hc], backends=backends,
+ enable_cdn=params['enable_cdn'], port_name=params['port_name'],
+ timeout_sec=params['timeout'], protocol=params['protocol'])
+
+ if bes:
+ changed = True
+ return_data = True
+
+ return (changed, return_data)
+
+
+def delete_backend_service(bes):
+ """
+ Delete a Backend Service. The Instance Groups are NOT destroyed.
+ """
+ changed = False
+ return_data = False
+ if bes.destroy():
+ changed = True
+ return_data = True
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ backends=dict(type='list', required=True),
+ backend_service_name=dict(required=True),
+ healthchecks=dict(type='list', required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ enable_cdn=dict(type='bool'),
+ port_name=dict(type='str'),
+ protocol=dict(type='str', default='TCP',
+ choices=['HTTP', 'HTTPS', 'SSL', 'TCP']),
+ timeout=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['backend_service_name'] = module.params.get('backend_service_name')
+ params['backends'] = module.params.get('backends')
+ params['healthchecks'] = module.params.get('healthchecks')
+ params['enable_cdn'] = module.params.get('enable_cdn', None)
+ params['port_name'] = module.params.get('port_name', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['timeout'] = module.params.get('timeout', None)
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ bes = get_backend_service(gce, params['backend_service_name'])
+
+ if not bes:
+ if params['state'] == 'absent':
+ # Doesn't exist and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown backend service: %s" %
+ (params['backend_service_name']))
+ else:
+ # Create
+ (changed, json_output['backend_service_created']) = create_backend_service(gce,
+ params)
+ elif params['state'] == 'absent':
+ # Delete
+ (changed, json_output['backend_service_deleted']) = delete_backend_service(bes)
+ else:
+ # TODO(supertom): Add update support when it is available in libcloud.
+ changed = False
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py
new file mode 100644
index 00000000..56dbfa7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_forwarding_rule
+short_description: Create, Update or Destroy a Forwarding_Rule.
+description:
+ - Create, Update or Destroy a Forwarding_Rule. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Global Forwarding_Rule API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules)
+ More details on the Forwarding Rules API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/forwardingRules)
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_forwarding_rule) or M(google.cloud.gcp_compute_global_forwarding_rule) instead.
+notes:
+ - Currently only supports global forwarding rules.
+ As such, Load Balancing Scheme is always EXTERNAL.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ address:
+ type: str
+ description:
+ - IPv4 or named IP address. Must be of the same scope (regional, global).
+ Reserved addresses can (and probably should) be used for global
+ forwarding rules. You may reserve IPs from the console or
+ via the gce_eip module.
+ required: false
+ forwarding_rule_name:
+ type: str
+ description:
+ - Name of the Forwarding_Rule.
+ required: true
+ port_range:
+ type: str
+ description:
+ - For global forwarding rules, must be set to 80 or 8080 for TargetHttpProxy, and
+ 443 for TargetHttpsProxy or TargetSslProxy.
+ required: false
+ protocol:
+ type: str
+ description:
+ - For global forwarding rules, TCP, UDP, ESP, AH, SCTP or ICMP. Default is TCP.
+ required: false
+ choices: [TCP]
+ default: TCP
+ region:
+ type: str
+ description:
+ - The region for this forwarding rule. Currently, only 'global' is supported.
+ required: true
+ state:
+ type: str
+ description:
+ - The state of the Forwarding Rule. 'present' or 'absent'
+ required: true
+ choices: ["present", "absent"]
+ target:
+ type: str
+ description:
+ - Target resource for forwarding rule. For global proxy, this is a Global
+ TargetProxy resource. Required for external load balancing (including Global load balancing)
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ load_balancing_scheme:
+ type: str
+ choices: [EXTERNAL]
+ default: EXTERNAL
+ description:
+ - Load balancing scheme. At the moment the only choice is EXTERNAL.
+'''
+
+EXAMPLES = '''
+- name: Create Minimum GLOBAL Forwarding_Rule
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ region: global
+ target: my-target-proxy
+ state: present
+
+- name: Create Forwarding_Rule w/reserved static address
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ address: my-reserved-static-address-name
+ region: global
+ target: my-target-proxy
+ state: present
+'''
+
+RETURN = '''
+forwarding_rule_name:
+ description: Name of the Forwarding_Rule
+ returned: Always
+ type: str
+ sample: my-target-proxy
+forwarding_rule:
+ description: GCP Forwarding_Rule dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-forwarding_rule", "target": "..." }
+region:
+ description: Region for Forwarding Rule.
+ returned: Always
+ type: bool
+ sample: true
+state:
+ description: state of the Forwarding_Rule
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-forwarding_rule'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_global_forwarding_rule_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'forwarding_rule_name')
+ if 'target' in gcp_dict:
+ gcp_dict['target'] = '%s/global/targetHttpProxies/%s' % (url,
+ gcp_dict['target'])
+ if 'address' in gcp_dict:
+ gcp_dict['IPAddress'] = '%s/global/addresses/%s' % (url,
+ gcp_dict['address'])
+ del gcp_dict['address']
+ if 'protocol' in gcp_dict:
+ gcp_dict['IPProtocol'] = gcp_dict['protocol']
+ del gcp_dict['protocol']
+ return gcp_dict
+
+
+def get_global_forwarding_rule(client, name, project_id=None):
+ """
+ Get a Global Forwarding Rule from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.globalForwardingRules().get(
+ project=project_id, forwardingRule=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_global_forwarding_rule(client, params, project_id):
+ """
+ Create a new Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+ try:
+ req = client.globalForwardingRules().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_global_forwarding_rule(client, name, project_id):
+ """
+ Delete a Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.globalForwardingRules().delete(
+ project=project_id, forwardingRule=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_global_forwarding_rule(client, forwarding_rule, params, name, project_id):
+ """
+ Update a Global Forwarding_Rule. Currently, only a target can be updated.
+
+ If the forwarding_rule has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param forwarding_rule: Name of the Target Proxy.
+ :type forwarding_rule: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+
+ GCPUtils.are_params_equal(forwarding_rule, gcp_dict)
+ if forwarding_rule['target'] == gcp_dict['target']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.globalForwardingRules().setTarget(project=project_id,
+ forwardingRule=name,
+ body={'target': gcp_dict['target']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ forwarding_rule_name=dict(required=True),
+ region=dict(required=True),
+ target=dict(required=False),
+ address=dict(type='str', required=False),
+ protocol=dict(required=False, default='TCP', choices=['TCP']),
+ port_range=dict(required=False),
+ load_balancing_scheme=dict(
+ required=False, default='EXTERNAL', choices=['EXTERNAL']),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['forwarding_rule_name'] = module.params.get('forwarding_rule_name')
+ params['region'] = module.params.get('region')
+ params['target'] = module.params.get('target', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['port_range'] = module.params.get('port_range')
+ if module.params.get('address', None):
+ params['address'] = module.params.get('address', None)
+
+ if params['region'] != 'global':
+ # This module currently doesn't support regional rules.
+ module.fail_json(
+ msg=("%s - Only global forwarding rules currently supported. "
+ "Be sure to specify 'global' for the region option.") %
+ (params['forwarding_rule_name']))
+
+ changed = False
+ json_output = {'state': params['state']}
+ forwarding_rule = None
+ if params['region'] == 'global':
+ forwarding_rule = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ if not forwarding_rule:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown forwarding_rule: %s" %
+ (params['forwarding_rule_name']))
+ else:
+ # Create
+ changed, json_output['forwarding_rule'] = create_global_forwarding_rule(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['forwarding_rule'] = delete_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['forwarding_rule'] = update_global_forwarding_rule(client,
+ forwarding_rule=forwarding_rule,
+ params=params,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py
new file mode 100644
index 00000000..19b28653
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_healthcheck
+short_description: Create, Update or Destroy a Healthcheck.
+description:
+ - Create, Update or Destroy a Healthcheck. Currently only HTTP and
+ HTTPS Healthchecks are supported. Healthchecks are used to monitor
+ individual instances, managed instance groups and/or backend
+ services. Healtchecks are reusable.
+ - Visit
+ U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
+ for an overview of Healthchecks on GCP.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
+ API details on HTTP Healthchecks.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
+ for more details on the HTTPS Healtcheck API.
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports HTTP and HTTPS Healthchecks currently.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: >
+ Use M(google.cloud.gcp_compute_health_check), M(google.cloud.gcp_compute_http_health_check) or
+ M(google.cloud.gcp_compute_https_health_check) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ check_interval:
+ type: int
+ description:
+ - How often (in seconds) to send a health check.
+ default: 5
+ healthcheck_name:
+ type: str
+ description:
+ - Name of the Healthcheck.
+ required: true
+ healthcheck_type:
+ type: str
+ description:
+ - Type of Healthcheck.
+ required: true
+ choices: ["HTTP", "HTTPS"]
+ host_header:
+ type: str
+ description:
+ - The value of the host header in the health check request. If left
+ empty, the public IP on behalf of which this health
+ check is performed will be used.
+ default: ""
+ port:
+ type: int
+ description:
+ - The TCP port number for the health check request. The default value is
+ 443 for HTTPS and 80 for HTTP.
+ request_path:
+ type: str
+ description:
+ - The request path of the HTTPS health check request.
+ required: false
+ default: "/"
+ state:
+ type: str
+ description: State of the Healthcheck.
+ choices: ["present", "absent"]
+ default: present
+ timeout:
+ type: int
+ description:
+ - How long (in seconds) to wait for a response before claiming
+ failure. It is invalid for timeout
+ to have a greater value than check_interval.
+ default: 5
+ unhealthy_threshold:
+ type: int
+ description:
+ - A so-far healthy instance will be marked unhealthy after this
+ many consecutive failures.
+ default: 2
+ healthy_threshold:
+ type: int
+ description:
+ - A so-far unhealthy instance will be marked healthy after this
+ many consecutive successes.
+ default: 2
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - Your GCP project ID
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ state: present
+- name: Create HTTP HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ host: my-host
+ request_path: /hc
+ check_interval: 10
+ timeout: 30
+ unhealthy_threshhold: 2
+ healthy_threshhold: 1
+ state: present
+- name: Create HTTPS HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: "{{ https_healthcheck }}"
+ healthcheck_type: HTTPS
+ host_header: my-host
+ request_path: /hc
+ check_interval: 5
+ timeout: 5
+ unhealthy_threshold: 2
+ healthy_threshold: 1
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Healthcheck
+ returned: Always.
+ type: str
+ sample: present
+healthcheck_name:
+ description: Name of the Healthcheck
+ returned: Always
+ type: str
+ sample: my-url-map
+healthcheck_type:
+ description: Type of the Healthcheck
+ returned: Always
+ type: str
+ sample: HTTP
+healthcheck:
+ description: GCP Healthcheck dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-healthcheck'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_healthcheck_params(params):
+ """
+ Validate healthcheck params.
+
+ Simple validation has already assumed by AnsibleModule.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ if params['timeout'] > params['check_interval']:
+ raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
+ params['timeout'], params['check_interval']))
+
+ return (True, '')
+
+
+def _build_healthcheck_dict(params):
+ """
+ Reformat services in Ansible Params for GCP.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP
+ HealthCheck (HTTP/HTTPS) API.
+ :rtype ``dict``
+ """
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
+ if 'timeout' in gcp_dict:
+ gcp_dict['timeoutSec'] = gcp_dict['timeout']
+ del gcp_dict['timeout']
+
+ if 'checkInterval' in gcp_dict:
+ gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
+ del gcp_dict['checkInterval']
+
+ if 'hostHeader' in gcp_dict:
+ gcp_dict['host'] = gcp_dict['hostHeader']
+ del gcp_dict['hostHeader']
+
+ if 'healthcheckType' in gcp_dict:
+ del gcp_dict['healthcheckType']
+ return gcp_dict
+
+
+def _get_req_resource(client, resource_type):
+ if resource_type == 'HTTPS':
+ return (client.httpsHealthChecks(), 'httpsHealthCheck')
+ else:
+ return (client.httpHealthChecks(), 'httpHealthCheck')
+
+
+def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
+ """
+ Get a Healthcheck from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.get(**args)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_healthcheck(client, params, project_id, resource_type='HTTP'):
+ """
+ Create a new Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ try:
+ resource, _ = _get_req_resource(client, resource_type)
+ args = {'project': project_id, 'body': gcp_dict}
+ req = resource.insert(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
+ """
+ Delete a Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.delete(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_healthcheck(client, healthcheck, params, name, project_id,
+ resource_type='HTTP'):
+ """
+ Update a Healthcheck.
+
+ If the healthcheck has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param healthcheck: Name of the Url Map.
+ :type healthcheck: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name, 'body': gcp_dict}
+ req = resource.update(**args)
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ healthcheck_name=dict(required=True),
+ healthcheck_type=dict(required=True,
+ choices=['HTTP', 'HTTPS']),
+ request_path=dict(required=False, default='/'),
+ check_interval=dict(required=False, type='int', default=5),
+ healthy_threshold=dict(required=False, type='int', default=2),
+ unhealthy_threshold=dict(required=False, type='int', default=2),
+ host_header=dict(required=False, type='str', default=''),
+ timeout=dict(required=False, type='int', default=5),
+ port=dict(required=False, type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+
+ params['healthcheck_name'] = module.params.get('healthcheck_name')
+ params['healthcheck_type'] = module.params.get('healthcheck_type')
+ params['request_path'] = module.params.get('request_path')
+ params['check_interval'] = module.params.get('check_interval')
+ params['healthy_threshold'] = module.params.get('healthy_threshold')
+ params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
+ params['host_header'] = module.params.get('host_header')
+ params['timeout'] = module.params.get('timeout')
+ params['port'] = module.params.get('port', None)
+ params['state'] = module.params.get('state')
+
+ if not params['port']:
+ params['port'] = 80
+ if params['healthcheck_type'] == 'HTTPS':
+ params['port'] = 443
+ try:
+ _validate_healthcheck_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ healthcheck = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+
+ if not healthcheck:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown healthcheck: %s" %
+ (params['healthcheck_name']))
+ else:
+ # Create
+ changed, json_output['healthcheck'] = create_healthcheck(client,
+ params=params,
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['healthcheck'] = delete_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ else:
+ changed, json_output['healthcheck'] = update_healthcheck(client,
+ healthcheck=healthcheck,
+ params=params,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py
new file mode 100644
index 00000000..611cee04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_target_proxy
+short_description: Create, Update or Destroy a Target_Proxy.
+description:
+ - Create, Update or Destroy a Target_Proxy. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Target_Proxy API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_target_http_proxy) instead.
+notes:
+ - Currently only supports global HTTP proxy.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ target_proxy_name:
+ type: str
+ description:
+ - Name of the Target_Proxy.
+ required: true
+ target_proxy_type:
+ type: str
+ description:
+ - Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported.
+ required: true
+ choices: [HTTP]
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url Map. Required if type is HTTP or HTTPS proxy.
+ required: false
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: str
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the target proxy should be in. C(present) or C(absent) are the only valid options.
+ required: true
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HTTP Target_Proxy
+ community.general.gcp_target_proxy:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ target_proxy_name: my-target_proxy
+ target_proxy_type: HTTP
+ url_map_name: my-url-map
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Target_Proxy
+ returned: Always.
+ type: str
+ sample: present
+updated_target_proxy:
+ description: True if the target_proxy has been updated. Will not appear on
+ initial target_proxy creation.
+ returned: if the target_proxy has been updated.
+ type: bool
+ sample: true
+target_proxy_name:
+ description: Name of the Target_Proxy
+ returned: Always
+ type: str
+ sample: my-target-proxy
+target_proxy_type:
+ description: Type of Target_Proxy. One of HTTP, HTTPS or SSL.
+ returned: Always
+ type: str
+ sample: HTTP
+target_proxy:
+ description: GCP Target_Proxy dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-target-proxy", "urlMap": "..." }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-target_proxy'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_target_proxy_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name')
+ if 'urlMap' in gcp_dict:
+ gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url,
+ gcp_dict['urlMap'])
+ return gcp_dict
+
+
+def get_target_http_proxy(client, name, project_id=None):
+ """
+ Get a Target HTTP Proxy from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ req = client.targetHttpProxies().get(project=project_id,
+ targetHttpProxy=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+
+
+def create_target_http_proxy(client, params, project_id):
+ """
+ Create a new Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+ try:
+ req = client.targetHttpProxies().insert(project=project_id,
+ body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_target_http_proxy(client, name, project_id):
+ """
+ Delete a Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.targetHttpProxies().delete(
+ project=project_id, targetHttpProxy=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_target_http_proxy(client, target_proxy, params, name, project_id):
+ """
+ Update a HTTP Target_Proxy. Currently only the Url Map can be updated.
+
+ If the target_proxy has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param target_proxy: Name of the Target Proxy.
+ :type target_proxy: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+
+ GCPUtils.are_params_equal(target_proxy, gcp_dict)
+ if target_proxy['urlMap'] == gcp_dict['urlMap']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.targetHttpProxies().setUrlMap(project=project_id,
+ targetHttpProxy=name,
+ body={"urlMap": gcp_dict['urlMap']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ target_proxy_name=dict(required=True),
+ target_proxy_type=dict(required=True, choices=['HTTP']),
+ url_map_name=dict(required=False),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['target_proxy_name'] = module.params.get('target_proxy_name')
+ params['target_proxy_type'] = module.params.get('target_proxy_type')
+ params['url_map'] = module.params.get('url_map_name', None)
+
+ changed = False
+ json_output = {'state': params['state']}
+ target_proxy = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+
+ if not target_proxy:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown target_proxy: %s" %
+ (params['target_proxy_name']))
+ else:
+ # Create
+ changed, json_output['target_proxy'] = create_target_http_proxy(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['target_proxy'] = delete_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['target_proxy'] = update_target_http_proxy(client,
+ target_proxy=target_proxy,
+ params=params,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_target_proxy'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py
new file mode 100644
index 00000000..3fc2c96b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_url_map
+short_description: Create, Update or Destroy a Url_Map.
+description:
+ - Create, Update or Destroy a Url_Map. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/url-map) for an overview.
+ More details on the Url_Map API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/urlMaps#resource).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports global Backend Services.
+ - Url_Map tests are not currently supported.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_url_map) instead.
+options:
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url_Map.
+ required: true
+ default_service:
+ type: str
+ description:
+ - Default Backend Service if no host rules match.
+ required: true
+ host_rules:
+ type: list
+ description:
+ - The list of HostRules to use against the URL. Contains
+ a list of hosts and an associated path_matcher.
+ - The 'hosts' parameter is a list of host patterns to match. They
+ must be valid hostnames, except * will match any string of
+ ([a-z0-9-.]*). In that case, * must be the first character
+ and must be followed in the pattern by either - or ..
+ - The 'path_matcher' parameter is name of the PathMatcher to use
+ to match the path portion of the URL if the hostRule matches the URL's
+ host portion.
+ required: false
+ path_matchers:
+ type: list
+ description:
+ - The list of named PathMatchers to use against the URL. Contains
+ path_rules, which is a list of paths and an associated service. A
+ default_service can also be specified for each path_matcher.
+ - The 'name' parameter to which this path_matcher is referred by the
+ host_rule.
+ - The 'default_service' parameter is the name of the
+ BackendService resource. This will be used if none of the path_rules
+ defined by this path_matcher is matched by the URL's path portion.
+ - The 'path_rules' parameter is a list of dictionaries containing a
+ list of paths and a service to direct traffic to. Each path item must
+ start with / and the only place a * is allowed is at the end following
+ a /. The string fed to the path matcher does not include any text after
+ the first ? or #, and those chars are not allowed here.
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the URL map should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimal Url_Map
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url_map
+ default_service: my-backend-service
+ state: present
+- name: Create UrlMap with pathmatcher
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url-map-pm
+ default_service: default-backend-service
+ path_matchers:
+ - name: 'path-matcher-one'
+ description: 'path matcher one'
+ default_service: 'bes-pathmatcher-one-default'
+ path_rules:
+ - service: 'my-one-bes'
+ paths:
+ - '/data'
+ - '/aboutus'
+ host_rules:
+ - hosts:
+ - '*.'
+ path_matcher: 'path-matcher-one'
+ state: "present"
+'''
+
+RETURN = '''
+host_rules:
+ description: List of HostRules.
+ returned: If specified.
+ type: dict
+ sample: [ { hosts: ["*."], "path_matcher": "my-pm" } ]
+path_matchers:
+ description: The list of named PathMatchers to use against the URL.
+ returned: If specified.
+ type: dict
+ sample: [ { "name": "my-pm", "path_rules": [ { "paths": [ "/data" ] } ], "service": "my-service" } ]
+state:
+ description: state of the Url_Map
+ returned: Always.
+ type: str
+ sample: present
+updated_url_map:
+ description: True if the url_map has been updated. Will not appear on
+ initial url_map creation.
+ returned: if the url_map has been updated.
+ type: bool
+ sample: true
+url_map_name:
+ description: Name of the Url_Map
+ returned: Always
+ type: str
+ sample: my-url-map
+url_map:
+ description: GCP Url_Map dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-url-map", "hostRules": [...], "pathMatchers": [...] }
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+from ansible.module_utils.six import string_types
+
+
+USER_AGENT_PRODUCT = 'ansible-url_map'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_params(params):
+ """
+ Validate url_map params.
+
+ This function calls _validate_host_rules_params to verify
+ the host_rules-specific parameters.
+
+ This function calls _validate_path_matchers_params to verify
+ the path_matchers-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'host_rules', 'type': list},
+ {'name': 'path_matchers', 'type': list},
+ ]
+ try:
+ check_params(params, fields)
+ if 'path_matchers' in params and params['path_matchers'] is not None:
+ _validate_path_matcher_params(params['path_matchers'])
+ if 'host_rules' in params and params['host_rules'] is not None:
+ _validate_host_rules_params(params['host_rules'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_path_matcher_params(path_matchers):
+ """
+ Validate configuration for path_matchers.
+
+ :param path_matchers: Ansible dictionary containing path_matchers
+ configuration (only).
+ :type path_matchers: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'name', 'type': str, 'required': True},
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'path_rules', 'type': list, 'required': True},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+ pr_fields = [
+ {'name': 'service', 'type': str, 'required': True},
+ {'name': 'paths', 'type': list, 'required': True},
+ ]
+
+ if not path_matchers:
+ raise ValueError(('path_matchers should be a list. %s (%s) provided'
+ % (path_matchers, type(path_matchers))))
+
+ for pm in path_matchers:
+ try:
+ check_params(pm, fields)
+ for pr in pm['path_rules']:
+ check_params(pr, pr_fields)
+ for path in pr['paths']:
+ if not path.startswith('/'):
+ raise ValueError("path for %s must start with /" % (
+ pm['name']))
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_host_rules_params(host_rules):
+ """
+ Validate configuration for host_rules.
+
+ :param host_rules: Ansible dictionary containing host_rules
+ configuration (only).
+ :type host_rules ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'path_matcher', 'type': str, 'required': True},
+ ]
+
+ if not host_rules:
+ raise ValueError('host_rules should be a list.')
+
+ for hr in host_rules:
+ try:
+ check_params(hr, fields)
+ for host in hr['hosts']:
+ if not isinstance(host, string_types):
+ raise ValueError("host in hostrules must be a string")
+ elif '*' in host:
+ if host.index('*') != 0:
+ raise ValueError("wildcard must be first char in host, %s" % (
+ host))
+ else:
+ if host[1] not in ['.', '-', ]:
+ raise ValueError("wildcard be followed by a '.' or '-', %s" % (
+ host))
+
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _build_path_matchers(path_matcher_list, project_id):
+ """
+ Reformat services in path matchers list.
+
+ Specifically, builds out URLs.
+
+ :param path_matcher_list: The GCP project ID.
+ :type path_matcher_list: ``list`` of ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: list suitable for submission to GCP
+ UrlMap API Path Matchers list.
+ :rtype ``list`` of ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ for pm in path_matcher_list:
+ if 'defaultService' in pm:
+ pm['defaultService'] = '%s/global/backendServices/%s' % (url,
+ pm['defaultService'])
+ if 'pathRules' in pm:
+ for rule in pm['pathRules']:
+ if 'service' in rule:
+ rule['service'] = '%s/global/backendServices/%s' % (url,
+ rule['service'])
+ return path_matcher_list
+
+
+def _build_url_map_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'url_map_name')
+ if 'defaultService' in gcp_dict:
+ gcp_dict['defaultService'] = '%s/global/backendServices/%s' % (url,
+ gcp_dict['defaultService'])
+ if 'pathMatchers' in gcp_dict:
+ gcp_dict['pathMatchers'] = _build_path_matchers(gcp_dict['pathMatchers'], project_id)
+
+ return gcp_dict
+
+
+def get_url_map(client, name, project_id=None):
+ """
+ Get a Url_Map from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.urlMaps().get(project=project_id, urlMap=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_url_map(client, params, project_id):
+ """
+ Create a new Url_Map.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+ try:
+ req = client.urlMaps().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_url_map(client, name, project_id):
+ """
+ Delete a Url_Map.
+
+ :param client: An initialized GCE Compute Discover resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.urlMaps().delete(project=project_id, urlMap=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_url_map(client, url_map, params, name, project_id):
+ """
+ Update a Url_Map.
+
+ If the url_map has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param url_map: Name of the Url Map.
+ :type url_map: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+
+ ans = GCPUtils.are_params_equal(url_map, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ gcp_dict['fingerprint'] = url_map['fingerprint']
+ try:
+ req = client.urlMaps().update(project=project_id,
+ urlMap=name, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ url_map_name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ default_service=dict(required=True),
+ path_matchers=dict(type='list', required=False),
+ host_rules=dict(type='list', required=False),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), required_together=[
+ ['path_matchers', 'host_rules'], ])
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['url_map_name'] = module.params.get('url_map_name')
+ params['default_service'] = module.params.get('default_service')
+ if module.params.get('path_matchers'):
+ params['path_matchers'] = module.params.get('path_matchers')
+ if module.params.get('host_rules'):
+ params['host_rules'] = module.params.get('host_rules')
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ url_map = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+
+ if not url_map:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown url_map: %s" %
+ (params['url_map_name']))
+ else:
+ # Create
+ changed, json_output['url_map'] = create_url_map(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['url_map'] = delete_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['url_map'] = update_url_map(client,
+ url_map=url_map,
+ params=params,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_url_map'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py
new file mode 100644
index 00000000..de257503
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.general.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py
new file mode 100644
index 00000000..e88fc26b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcspanner
+short_description: Create and Delete Instances/Databases on Spanner
+description:
+ - Create and Delete Instances/Databases on Spanner.
+ See U(https://cloud.google.com/spanner/docs) for an overview.
+requirements:
+ - python >= 2.6
+ - google-auth >= 0.5.0
+ - google-cloud-spanner >= 0.23.0
+notes:
+ - Changing the configuration on an existing instance is not supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_spanner_database) and/or M(google.cloud.gcp_spanner_instance) instead.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ configuration:
+ type: str
+ description:
+ - Configuration the instance should use.
+ - Examples are us-central1, asia-east1 and europe-west1.
+ required: yes
+ instance_id:
+ type: str
+ description:
+ - GCP spanner instance name.
+ required: yes
+ database_name:
+ type: str
+ description:
+ - Name of database contained on the instance.
+ force_instance_delete:
+ description:
+ - To delete an instance, this argument must exist and be true (along with state being equal to absent).
+ type: bool
+ default: 'no'
+ instance_display_name:
+ type: str
+ description:
+ - Name of Instance to display.
+ - If not specified, instance_id will be used instead.
+ node_count:
+ type: int
+ description:
+ - Number of nodes in the instance.
+ default: 1
+ state:
+ type: str
+ description:
+ - State of the instance or database. Applies to the most granular resource.
+ - If a C(database_name) is specified we remove it.
+ - If only C(instance_id) is specified, that is what is removed.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Create instance
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: present
+ node_count: 1
+
+- name: Create database
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ database_name: '{{ database_name }}'
+ state: present
+
+- name: Delete instance (and all databases)
+- community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: absent
+ force_instance_delete: yes
+'''
+
+RETURN = '''
+state:
+ description: The state of the instance or database. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+database_name:
+ description: Name of database.
+ returned: When database name is specified
+ type: str
+ sample: "mydatabase"
+
+instance_id:
+ description: Name of instance.
+ returned: Always
+ type: str
+ sample: "myinstance"
+
+previous_values:
+ description: List of dictionaries containing previous values prior to update.
+ returned: When an instance update has occurred and a field has been modified.
+ type: dict
+ sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
+
+updated:
+ description: Boolean field to denote an update has occurred.
+ returned: When an update has occurred.
+ type: bool
+ sample: True
+'''
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import spanner
+ from google.gax.errors import GaxError
+ HAS_GOOGLE_CLOUD_SPANNER = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_SPANNER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+from ansible.module_utils.six import string_types
+
+
+CLOUD_CLIENT = 'google-cloud-spanner'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
+
+
+def get_spanner_configuration_name(config_name, project_name):
+ config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
+ config_name)
+ return config_name
+
+
+def instance_update(instance):
+ """
+ Call update method on spanner client.
+
+ Note: A ValueError exception is thrown despite the client succeeding.
+ So, we validate the node_count and instance_display_name parameters and then
+ ignore the ValueError exception.
+
+ :param instance: a Spanner instance object
+ :type instance: class `google.cloud.spanner.Instance`
+
+ :returns True on success, raises ValueError on type error.
+ :rtype ``bool``
+ """
+ errmsg = ''
+ if not isinstance(instance.node_count, int):
+ errmsg = 'node_count must be an integer %s (%s)' % (
+ instance.node_count, type(instance.node_count))
+ if instance.display_name and not isinstance(instance.display_name,
+ string_types):
+ errmsg = 'instance_display_name must be an string %s (%s)' % (
+ instance.display_name, type(instance.display_name))
+ if errmsg:
+ raise ValueError(errmsg)
+
+ try:
+ instance.update()
+ except ValueError:
+ # The ValueError here is the one we 'expect'.
+ pass
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ database_name=dict(type='str'),
+ configuration=dict(type='str', required=True),
+ node_count=dict(type='int', default=1),
+ instance_display_name=dict(type='str'),
+ force_instance_delete=dict(type='bool', default=False),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_SPANNER:
+ module.fail_json(msg="Please install google-cloud-spanner.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" %
+ (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['instance_id'] = module.params.get('instance_id')
+ mod_params['database_name'] = module.params.get('database_name')
+ mod_params['configuration'] = module.params.get('configuration')
+ mod_params['node_count'] = module.params.get('node_count', None)
+ mod_params['instance_display_name'] = module.params.get('instance_display_name')
+ mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
+
+ creds, params = get_google_cloud_credentials(module)
+ spanner_client = spanner.Client(project=params['project_id'],
+ credentials=creds,
+ user_agent=CLOUD_CLIENT_USER_AGENT)
+ changed = False
+ json_output = {}
+
+ i = None
+ if mod_params['instance_id']:
+ config_name = get_spanner_configuration_name(
+ mod_params['configuration'], params['project_id'])
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name)
+ d = None
+ if mod_params['database_name']:
+ # TODO(supertom): support DDL
+ ddl_statements = ''
+ d = i.database(mod_params['database_name'], ddl_statements)
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If database is specified
+ # we remove it. If only instance is specified, that is what is removed.
+ if d is not None and d.exists():
+ d.drop()
+ changed = True
+ else:
+ if i.exists():
+ if mod_params['force_instance_delete']:
+ i.delete()
+ else:
+ module.fail_json(
+ msg=(("Cannot delete Spanner instance: "
+ "'force_instance_delete' argument not specified")))
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not i.exists():
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name,
+ display_name=mod_params['instance_display_name'],
+ node_count=mod_params['node_count'] or 1)
+ i.create()
+ changed = True
+ else:
+ # update instance
+ i.reload()
+ inst_prev_vals = {}
+ if i.display_name != mod_params['instance_display_name']:
+ inst_prev_vals['instance_display_name'] = i.display_name
+ i.display_name = mod_params['instance_display_name']
+ if mod_params['node_count']:
+ if i.node_count != mod_params['node_count']:
+ inst_prev_vals['node_count'] = i.node_count
+ i.node_count = mod_params['node_count']
+ if inst_prev_vals:
+ changed = instance_update(i)
+ json_output['updated'] = changed
+ json_output['previous_values'] = {'instance': inst_prev_vals}
+ if d:
+ if not d.exists():
+ d.create()
+ d.reload()
+ changed = True
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py
new file mode 100644
index 00000000..516c9b0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gem
+short_description: Manage Ruby gems
+description:
+ - Manage installation and uninstallation of Ruby gems.
+options:
+ name:
+ type: str
+ description:
+ - The name of the gem to be managed.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ required: false
+ choices: [present, absent, latest]
+ default: present
+ gem_source:
+ type: path
+ description:
+ - The path to a local gem used as installation source.
+ required: false
+ include_dependencies:
+ description:
+ - Whether to include dependencies or not.
+ required: false
+ type: bool
+ default: "yes"
+ repository:
+ type: str
+ description:
+ - The repository from which the gem will be installed
+ required: false
+ aliases: [source]
+ user_install:
+ description:
+ - Install gem in user's local gems cache or for all users
+ required: false
+ type: bool
+ default: "yes"
+ executable:
+ type: path
+ description:
+ - Override the path to the gem executable
+ required: false
+ install_dir:
+ type: path
+ description:
+ - Install the gems into a specific directory.
+ These gems will be independent from the global installed ones.
+ Specifying this requires user_install to be false.
+ required: false
+ env_shebang:
+ description:
+ - Rewrite the shebang line on installed scripts to use /usr/bin/env.
+ required: false
+ default: "no"
+ type: bool
+ version:
+ type: str
+ description:
+ - Version of the gem to be installed/removed.
+ required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: "no"
+ type: bool
+ include_doc:
+ description:
+ - Install with or without docs.
+ required: false
+ default: "no"
+ type: bool
+ build_flags:
+ type: str
+ description:
+ - Allow adding build flags for gem compilation
+ required: false
+ force:
+ description:
+ - Force gem to install, bypassing dependency checks.
+ required: false
+ default: "no"
+ type: bool
+author:
+ - "Ansible Core Team"
+ - "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+- name: Install version 1.0 of vagrant
+ community.general.gem:
+ name: vagrant
+ version: 1.0
+ state: present
+
+- name: Install latest available version of rake
+ community.general.gem:
+ name: rake
+ state: latest
+
+- name: Install rake version 1.0 from a local gem on disk
+ community.general.gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_rubygems_path(module):
+ if module.params['executable']:
+ result = module.params['executable'].split(' ')
+ else:
+ result = [module.get_bin_path('gem', True)]
+ return result
+
+
+def get_rubygems_version(module):
+ cmd = get_rubygems_path(module) + ['--version']
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
+ if not match:
+ return None
+
+ return tuple(int(x) for x in match.groups())
+
+
+def get_rubygems_environ(module):
+ if module.params['install_dir']:
+ return {'GEM_HOME': module.params['install_dir']}
+ return None
+
+
+def get_installed_versions(module, remote=False):
+
+ cmd = get_rubygems_path(module)
+ cmd.append('query')
+ if remote:
+ cmd.append('--remote')
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ cmd.append('-n')
+ cmd.append('^%s$' % module.params['name'])
+
+ environ = get_rubygems_environ(module)
+ (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
+ installed_versions = []
+ for line in out.splitlines():
+ match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
+ if match:
+ versions = match.group(1)
+ for version in versions.split(', '):
+ installed_versions.append(version.split()[0])
+ return installed_versions
+
+
+def exists(module):
+ if module.params['state'] == 'latest':
+ remoteversions = get_installed_versions(module, remote=True)
+ if remoteversions:
+ module.params['version'] = remoteversions[0]
+ installed_versions = get_installed_versions(module)
+ if module.params['version']:
+ if module.params['version'] in installed_versions:
+ return True
+ else:
+ if installed_versions:
+ return True
+ return False
+
+
+def uninstall(module):
+
+ if module.check_mode:
+ return
+ cmd = get_rubygems_path(module)
+ environ = get_rubygems_environ(module)
+ cmd.append('uninstall')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ else:
+ cmd.append('--all')
+ cmd.append('--executable')
+ cmd.append(module.params['name'])
+ module.run_command(cmd, environ_update=environ, check_rc=True)
+
+
+def install(module):
+
+ if module.check_mode:
+ return
+
+ ver = get_rubygems_version(module)
+ if ver:
+ major = ver[0]
+ else:
+ major = None
+
+ cmd = get_rubygems_path(module)
+ cmd.append('install')
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ if not module.params['include_dependencies']:
+ cmd.append('--ignore-dependencies')
+ else:
+ if major and major < 2:
+ cmd.append('--include-dependencies')
+ if module.params['user_install']:
+ cmd.append('--user-install')
+ else:
+ cmd.append('--no-user-install')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+ if module.params['pre_release']:
+ cmd.append('--pre')
+ if not module.params['include_doc']:
+ if major and major < 2:
+ cmd.append('--no-rdoc')
+ cmd.append('--no-ri')
+ else:
+ cmd.append('--no-document')
+ if module.params['env_shebang']:
+ cmd.append('--env-shebang')
+ cmd.append(module.params['gem_source'])
+ if module.params['build_flags']:
+ cmd.extend(['--', module.params['build_flags']])
+ if module.params['force']:
+ cmd.append('--force')
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ install_dir=dict(required=False, type='path'),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
+ force=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
+ )
+
+ if module.params['version'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot specify version when state=latest")
+ if module.params['gem_source'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot maintain state=latest when installing from local source")
+ if module.params['user_install'] and module.params['install_dir']:
+ module.fail_json(msg="install_dir requires user_install=false")
+
+ if not module.params['gem_source']:
+ module.params['gem_source'] = module.params['name']
+
+ changed = False
+
+ if module.params['state'] in ['present', 'latest']:
+ if not exists(module):
+ install(module)
+ changed = True
+ elif module.params['state'] == 'absent':
+ if exists(module):
+ uninstall(module)
+ changed = True
+
+ result = {}
+ result['name'] = module.params['name']
+ result['state'] = module.params['state']
+ if module.params['version']:
+ result['version'] = module.params['version']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py
new file mode 100644
index 00000000..66ef45f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas <marius@pov.lt>
+# (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - Matthew Gamble (@djmattyg007)
+ - Marius Gedminas (@mgedmin)
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The C(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(ansible.builtin.template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ choices: [ "local", "global", "system" ]
+ state:
+ description:
+ - "Indicates the setting should be set/unset.
+ This parameter has higher precedence than I(value) parameter:
+ when I(state)=absent and I(value) is defined, I(value) is discarded."
+ choices: [ 'present', 'absent' ]
+ default: 'present'
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+'''
+
+EXAMPLES = '''
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+- name: Remove a setting from ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ state: absent
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+- name: Add a setting system-wide
+ community.general.git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: color.ui
+ value: auto
+
+- name: Make etckeeper not complaining when it is invoked by cron
+ community.general.git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+- name: Read individual values from git config
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+
+- name: Scope system is also assumed when reading values, unless list_all=yes
+ community.general.git_config:
+ name: alias.diffc
+
+- name: Read all values from git config
+ community.general.git_config:
+ list_all: yes
+ scope: global
+
+- name: When list_all is yes and no scope is specified, you get configuration from all scopes
+ community.general.git_config:
+ list_all: yes
+
+- name: Specify a repository to include local settings
+ community.general.git_config:
+ list_all: yes
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: str
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dict
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git', True)
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['state'] == 'absent':
+ unset = 'unset'
+ params['value'] = None
+ else:
+ unset = None
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value and not unset:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ elif unset and not out:
+ module.exit_json(changed=False, msg='no setting to unset')
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ if unset:
+ args.insert(len(args) - 1, "--" + unset)
+ cmd = ' '.join(args)
+ else:
+ new_value_quoted = shlex_quote(new_value)
+ cmd = ' '.join(args + [new_value_quoted])
+ try: # try using extra parameter from ansible-base 2.10.4 onwards
+ (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False)
+ except TypeError:
+ # @TODO remove try/except when community.general drop support for 2.10.x
+ if not os.path.isdir(dir):
+ module.fail_json(msg="Cannot find directory '{0}'".format(dir))
+ (rc, out, err) = module.run_command(cmd, cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=cmd)
+
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=(new_value or '') + "\n"
+ ),
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py
new file mode 100644
index 00000000..8836454e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_deploy_key
+author: "Ali (@bincyber)"
+short_description: Manages deploy keys for GitHub repositories.
+description:
+ - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
+ username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
+ rights on the repository are required."
+options:
+ github_url:
+ description:
+ - The base URL of the GitHub API
+ required: false
+ type: str
+ version_added: '0.2.0'
+ default: https://api.github.com
+ owner:
+ description:
+ - The name of the individual account or organization that owns the GitHub repository.
+ required: true
+ aliases: [ 'account', 'organization' ]
+ repo:
+ description:
+ - The name of the GitHub repository.
+ required: true
+ aliases: [ 'repository' ]
+ name:
+ description:
+ - The name for the deploy key.
+ required: true
+ aliases: [ 'title', 'label' ]
+ key:
+ description:
+ - The SSH public key to add to the repository as a deploy key.
+ required: true
+ read_only:
+ description:
+ - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - The state of the deploy key.
+ default: "present"
+ choices: [ "present", "absent" ]
+ force:
+ description:
+ - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ type: bool
+ default: 'no'
+ username:
+ description:
+ - The username to authenticate with. Should not be set when using personal access token
+ password:
+ description:
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ token:
+ description:
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ otp:
+ description:
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ aliases: ['2fa_token']
+notes:
+ - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
+'''
+
+EXAMPLES = '''
+- name: Add a new read-only deploy key to a GitHub repository using basic authentication
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Remove an existing deploy key from a GitHub repository
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ force: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+ state: absent
+
+- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ force: yes
+ token: "ABAQDAwXxn7kIMNWzcDfo..."
+
+- name: Re-add a deploy key to a GitHub repository but with a different name
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "replace-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Add a new deploy key to a GitHub repository using 2FA
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key-2"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ username: "johndoe"
+ password: "supersecretpassword"
+ otp: 123456
+
+- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
+ community.general.github_deploy_key:
+ github_url: "https://api.example.com"
+ owner: "janedoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "janedoe"
+ password: "supersecretpassword"
+'''
+
+RETURN = '''
+msg:
+ description: the status message describing what occurred
+ returned: always
+ type: str
+ sample: "Deploy key added successfully"
+
+http_status_code:
+ description: the HTTP status code returned by the GitHub API
+ returned: failed
+ type: int
+ sample: 400
+
+error:
+ description: the error message returned by the GitHub API
+ returned: failed
+ type: str
+ sample: "key is already in use"
+
+id:
+ description: the key identifier assigned by GitHub for the deploy key
+ returned: changed
+ type: int
+ sample: 24381901
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from re import findall
+
+
+class GithubDeployKey(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.github_url = self.module.params['github_url']
+ self.name = module.params['name']
+ self.key = module.params['key']
+ self.state = module.params['state']
+ self.read_only = module.params.get('read_only', True)
+ self.force = module.params.get('force', False)
+ self.username = module.params.get('username', None)
+ self.password = module.params.get('password', None)
+ self.token = module.params.get('token', None)
+ self.otp = module.params.get('otp', None)
+
+ @property
+ def url(self):
+ owner = self.module.params['owner']
+ repo = self.module.params['repo']
+ return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
+
+ @property
+ def headers(self):
+ if self.username is not None and self.password is not None:
+ self.module.params['url_username'] = self.username
+ self.module.params['url_password'] = self.password
+ self.module.params['force_basic_auth'] = True
+ if self.otp is not None:
+ return {"X-GitHub-OTP": self.otp}
+ elif self.token is not None:
+ return {"Authorization": "token {0}".format(self.token)}
+ else:
+ return None
+
+ def paginate(self, url):
+ while url:
+ resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
+
+ if info["status"] == 200:
+ yield self.module.from_json(resp.read())
+
+ links = {}
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ links[y] = x
+
+ url = links.get('next')
+ else:
+ self.handle_error(method="GET", info=info)
+
+ def get_existing_key(self):
+ for keys in self.paginate(self.url):
+ if keys:
+ for i in keys:
+ existing_key_id = str(i["id"])
+ if i["key"].split() == self.key.split()[:2]:
+ return existing_key_id
+ elif i['title'] == self.name and self.force:
+ return existing_key_id
+ else:
+ return None
+
+ def add_new_key(self):
+ request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
+
+ resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
+
+ status_code = info["status"]
+
+ if status_code == 201:
+ response_body = self.module.from_json(resp.read())
+ key_id = response_body["id"]
+ self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
+ elif status_code == 422:
+ self.module.exit_json(changed=False, msg="Deploy key already exists")
+ else:
+ self.handle_error(method="POST", info=info)
+
+ def remove_existing_key(self, key_id):
+ resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
+
+ status_code = info["status"]
+
+ if status_code == 204:
+ if self.state == 'absent':
+ self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
+ else:
+ self.handle_error(method="DELETE", info=info, key_id=key_id)
+
+ def handle_error(self, method, info, key_id=None):
+ status_code = info['status']
+ body = info.get('body')
+ if body:
+ err = self.module.from_json(body)['message']
+
+ if status_code == 401:
+ self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
+ elif status_code == 404:
+ self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
+ else:
+ if method == "GET":
+ self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
+ elif method == "POST":
+ self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
+ elif method == "DELETE":
+ self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ github_url=dict(required=False, type='str', default="https://api.github.com"),
+ owner=dict(required=True, type='str', aliases=['account', 'organization']),
+ repo=dict(required=True, type='str', aliases=['repository']),
+ name=dict(required=True, type='str', aliases=['title', 'label']),
+ key=dict(required=True, type='str'),
+ read_only=dict(required=False, type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ force=dict(required=False, type='bool', default=False),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True),
+ token=dict(required=False, type='str', no_log=True)
+ ),
+ mutually_exclusive=[
+ ['password', 'token']
+ ],
+ required_together=[
+ ['username', 'password'],
+ ['otp', 'username', 'password']
+ ],
+ required_one_of=[
+ ['username', 'token']
+ ],
+ supports_check_mode=True,
+ )
+
+ deploy_key = GithubDeployKey(module)
+
+ if module.check_mode:
+ key_id = deploy_key.get_existing_key()
+ if deploy_key.state == "present" and key_id is None:
+ module.exit_json(changed=True)
+ elif deploy_key.state == "present" and key_id is not None:
+ module.exit_json(changed=False)
+
+ # to forcefully modify an existing key, the existing key must be deleted first
+ if deploy_key.state == 'absent' or deploy_key.force:
+ key_id = deploy_key.get_existing_key()
+
+ if key_id is not None:
+ deploy_key.remove_existing_key(key_id)
+ elif deploy_key.state == 'absent':
+ module.exit_json(changed=False, msg="Deploy key does not exist")
+
+ if deploy_key.state == "present":
+ deploy_key.add_new_key()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py
new file mode 100644
index 00000000..e326711d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_hooks
+short_description: Manages GitHub service hooks.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Replaced by more granular modules
+ alternative: Use M(community.general.github_webhook) and M(community.general.github_webhook_info) instead.
+description:
+ - Adds service hooks and removes service hooks that have an error status.
+options:
+ user:
+ description:
+ - GitHub username.
+ required: true
+ oauthkey:
+ description:
+ - The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
+ required: true
+ repo:
+ description:
+ - >
+ This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
+ Note this is different than the normal repo url.
+ required: true
+ hookurl:
+ description:
+ - When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
+ required: false
+ action:
+ description:
+ - This tells the githooks module what you want it to do.
+ required: true
+ choices: [ "create", "cleanall", "list", "clean504" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ content_type:
+ description:
+ - Content type to use for requests made to the webhook
+ required: false
+ default: 'json'
+ choices: ['json', 'form']
+
+author: "Phillip Gentry, CX Inc (@pcgentry)"
+'''
+
+EXAMPLES = '''
+- name: Create a new service hook ignoring duplicates
+ community.general.github_hooks:
+ action: create
+ hookurl: http://11.111.111.111:2222
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+
+# Cleaning all hooks for this repo that had an error on the last update.
+# Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+- name: Clean all hooks
+ community.general.github_hooks:
+ action: cleanall
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: '{{ repo }}'
+ delegate_to: localhost
+'''
+
+import json
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes
+
+
+def request(module, url, user, oauthkey, data='', method='GET'):
+ auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method=method)
+ return response, info
+
+
+def _list(module, oauthkey, repo, user):
+ url = "%s/hooks" % repo
+ response, info = request(module, url, user, oauthkey)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+
+def _clean504(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] == 504:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _cleanall(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] != 200:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _create(module, hookurl, oauthkey, repo, user, content_type):
+ url = "%s/hooks" % repo
+ values = {
+ "active": True,
+ "name": "web",
+ "config": {
+ "url": "%s" % hookurl,
+ "content_type": "%s" % content_type
+ }
+ }
+ data = json.dumps(values)
+ response, info = request(module, url, user, oauthkey, data=data, method='POST')
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+
+def _delete(module, oauthkey, repo, user, hookid):
+ url = "%s/hooks/%s" % (repo, hookid)
+ response, info = request(module, url, user, oauthkey, method='DELETE')
+ return response.read()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
+ hookurl=dict(required=False),
+ oauthkey=dict(required=True, no_log=True),
+ repo=dict(required=True),
+ user=dict(required=True),
+ validate_certs=dict(default=True, type='bool'),
+ content_type=dict(default='json', choices=['json', 'form']),
+ )
+ )
+
+ action = module.params['action']
+ hookurl = module.params['hookurl']
+ oauthkey = module.params['oauthkey']
+ repo = module.params['repo']
+ user = module.params['user']
+ content_type = module.params['content_type']
+
+ if action == "list":
+ (rc, out) = _list(module, oauthkey, repo, user)
+
+ if action == "clean504":
+ (rc, out) = _clean504(module, oauthkey, repo, user)
+
+ if action == "cleanall":
+ (rc, out) = _cleanall(module, oauthkey, repo, user)
+
+ if action == "create":
+ (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py
new file mode 100644
index 00000000..9c4b558b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_issue
+short_description: View GitHub issue.
+description:
+ - View GitHub issue for a given repository and organization.
+options:
+ repo:
+ description:
+ - Name of repository from which issue needs to be retrieved.
+ required: true
+ organization:
+ description:
+ - Name of the GitHub organization in which the repository is hosted.
+ required: true
+ issue:
+ description:
+ - Issue number for which information is required.
+ required: true
+ action:
+ description:
+ - Get various details about issue depending upon action specified.
+ default: 'get_status'
+ choices:
+ - 'get_status'
+author:
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+RETURN = '''
+get_status:
+ description: State of the GitHub issue
+ type: str
+ returned: success
+ sample: open, closed
+'''
+
+EXAMPLES = '''
+- name: Check if GitHub issue is closed or not
+ community.general.github_issue:
+ organization: ansible
+ repo: ansible
+ issue: 23642
+ action: get_status
+ register: r
+
+- name: Take action depending upon issue status
+ ansible.builtin.debug:
+ msg: Do something when issue 23642 is open
+ when: r.issue_status == 'open'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ organization=dict(required=True),
+ repo=dict(required=True),
+ issue=dict(type='int', required=True),
+ action=dict(choices=['get_status'], default='get_status'),
+ ),
+ supports_check_mode=True,
+ )
+
+ organization = module.params['organization']
+ repo = module.params['repo']
+ issue = module.params['issue']
+ action = module.params['action']
+
+ result = dict()
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+
+ url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
+
+ response, info = fetch_url(module, url, headers=headers)
+ if not (200 <= info['status'] < 400):
+ if info['status'] == 404:
+ module.fail_json(msg="Failed to find issue %s" % issue)
+ module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
+
+ gh_obj = json.loads(response.read())
+
+ if action == 'get_status' or action is None:
+ if module.check_mode:
+ result.update(changed=True)
+ else:
+ result.update(changed=True, issue_status=gh_obj['state'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py
new file mode 100644
index 00000000..415065f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ type: bool
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import json
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = self.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ result = []
+ while url:
+ r = session.request('GET', url)
+ result.extend(r.json())
+ url = r.links().get('next')
+ return result
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(module, session, name, pubkey, force, check_mode):
+ all_keys = get_all_keys(session)
+ matching_keys = [k for k in all_keys if k['title'] == name]
+ deleted_keys = []
+
+ new_signature = pubkey.split(' ')[1]
+ for key in all_keys:
+ existing_signature = key['key'].split(' ')[1]
+ if new_signature == existing_signature and key['title'] != name:
+ module.fail_json(msg=(
+ "another key with the same content is already registered "
+ "under the name |{0}|").format(key['title']))
+
+ if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(module, session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py
new file mode 100644
index 00000000..5372d6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about GitHub Releases
+options:
+ token:
+ description:
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ user:
+ description:
+ - The GitHub account that owns the repository
+ required: true
+ password:
+ description:
+ - The GitHub account password for the user. Mutually exclusive with C(token).
+ repo:
+ description:
+ - Repository name
+ required: true
+ action:
+ description:
+ - Action to perform
+ required: true
+ choices: [ 'latest_release', 'create_release' ]
+ tag:
+ description:
+ - Tag name when creating a release. Required when using action is set to C(create_release).
+ target:
+ description:
+ - Target of release when creating a release
+ name:
+ description:
+ - Name of release when creating a release
+ body:
+ description:
+ - Description of the release when creating a release
+ draft:
+ description:
+ - Sets if the release is a draft or not. (boolean)
+ type: 'bool'
+ default: 'no'
+ prerelease:
+ description:
+ - Sets if the release is a prerelease or not. (boolean)
+ type: bool
+ default: 'no'
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of a public repository
+ community.general.github_release:
+ user: ansible
+ repo: ansible
+ action: latest_release
+
+- name: Get latest release of testuseer/testrepo
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+
+- name: Get latest release of test repo using username and password. Ansible 2.4.
+ community.general.github_release:
+ user: testuser
+ password: secret123
+ repo: testrepo
+ action: latest_release
+
+- name: Create a new release
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: create_release
+ tag: test
+ target: master
+ name: My Release
+ body: Some description
+
+'''
+
+RETURN = '''
+create_release:
+ description:
+ - Version of the created release
+ - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
+ - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
+ type: str
+ returned: success
+ sample: 1.1.0
+
+latest_release:
+ description: Version of the latest release
+ type: str
+ returned: success
+ sample: 1.1.0
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ action=dict(
+ required=True, choices=['latest_release', 'create_release']),
+ tag=dict(type='str'),
+ target=dict(type='str'),
+ name=dict(type='str'),
+ body=dict(type='str'),
+ draft=dict(type='bool', default=False),
+ prerelease=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('password', 'token'),),
+ required_if=[('action', 'create_release', ['tag']),
+ ('action', 'create_release', ['password', 'token'], True)],
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
+ exception=GITHUB_IMP_ERR)
+
+ repo = module.params['repo']
+ user = module.params['user']
+ password = module.params['password']
+ login_token = module.params['token']
+ action = module.params['action']
+ tag = module.params.get('tag')
+ target = module.params.get('target')
+ name = module.params.get('name')
+ body = module.params.get('body')
+ draft = module.params.get('draft')
+ prerelease = module.params.get('prerelease')
+
+ # login to github
+ try:
+ if password:
+ gh_obj = github3.login(user, password=password)
+ elif login_token:
+ gh_obj = github3.login(token=login_token)
+ else:
+ gh_obj = github3.GitHub()
+
+ # test if we're actually logged in
+ if password or login_token:
+ gh_obj.me()
+ except github3.exceptions.AuthenticationFailed as e:
+ module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
+
+ repository = gh_obj.repository(user, repo)
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+ if action == 'create_release':
+ release_exists = repository.release_from_tag(tag)
+ if release_exists:
+ module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
+
+ release = repository.create_release(
+ tag, target, name, body, draft, prerelease)
+ if release:
+ module.exit_json(changed=True, tag=release.tag_name)
+ else:
+ module.exit_json(changed=False, tag=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py
new file mode 100644
index 00000000..ac153689
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook
+short_description: Manage GitHub webhooks
+description:
+ - "Create and delete GitHub webhooks"
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ url:
+ description:
+ - URL to which payloads will be delivered
+ required: true
+ content_type:
+ description:
+ - The media type used to serialize the payloads
+ required: false
+ choices: [ form, json ]
+ default: form
+ secret:
+ description:
+ - The shared secret between GitHub and the payload URL.
+ required: false
+ insecure_ssl:
+ description:
+ - >
+ Flag to indicate that GitHub should skip SSL verification when calling
+ the hook.
+ required: false
+ type: bool
+ default: false
+ events:
+ description:
+ - >
+ A list of GitHub events the hook is triggered for. Events are listed at
+ U(https://developer.github.com/v3/activity/events/types/). Required
+ unless C(state) is C(absent)
+ required: false
+ type: list
+ elements: str
+ active:
+ description:
+ - Whether or not the hook is active
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether the hook should be present or absent
+ required: false
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the GitHub API
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: create a new webhook that triggers on push (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ events:
+ - push
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+
+- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
+ community.general.github_webhook:
+ repository: myorg/myrepo
+ url: https://jenkins.example.com/ghprbhook/
+ content_type: json
+ secret: "{{ github_shared_secret }}"
+ insecure_ssl: True
+ events:
+ - issue_comment
+ - pull_request
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com
+
+- name: Delete a webhook (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ state: absent
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+'''
+
+RETURN = '''
+---
+hook_id:
+ description: The GitHub ID of the hook created/updated
+ returned: when state is 'present'
+ type: int
+ sample: 6206
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _create_hook_config(module):
+ return {
+ "url": module.params["url"],
+ "content_type": module.params["content_type"],
+ "secret": module.params.get("secret"),
+ "insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
+ }
+
+
+def create_hook(repo, module):
+ config = _create_hook_config(module)
+ try:
+ hook = repo.create_hook(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to create hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return True, data
+
+
+def update_hook(repo, hook, module):
+ config = _create_hook_config(module)
+ try:
+ hook.update()
+ hook.edit(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+
+ changed = hook.update()
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return changed, data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=['repo']),
+ url=dict(type='str', required=True),
+ content_type=dict(
+ type='str',
+ choices=('json', 'form'),
+ required=False,
+ default='form'),
+ secret=dict(type='str', required=False, no_log=True),
+ insecure_ssl=dict(type='bool', required=False, default=False),
+ events=dict(type='list', elements='str', required=False),
+ active=dict(type='bool', required=False, default=True),
+ state=dict(
+ type='str',
+ required=False,
+ choices=('absent', 'present'),
+ default='present'),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'),),
+ required_one_of=(("password", "token"),),
+ required_if=(("state", "present", ("events",)),),
+ )
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ hook = None
+ try:
+ for hook in repo.get_hooks():
+ if hook.config.get("url") == module.params["url"]:
+ break
+ else:
+ hook = None
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
+ module.params["repository"], to_native(err)))
+
+ changed = False
+ data = {}
+ if hook is None and module.params["state"] == "present":
+ changed, data = create_hook(repo, module)
+ elif hook is not None and module.params["state"] == "absent":
+ try:
+ hook.delete()
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to delete hook from repository %s: %s" % (
+ repo.full_name, to_native(err)))
+ else:
+ changed = True
+ elif hook is not None and module.params["state"] == "present":
+ changed, data = update_hook(repo, hook, module)
+ # else, there is no hook and we want there to be no hook
+
+ module.exit_json(changed=changed, **data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py
new file mode 100644
index 00000000..f99a0a03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook_info
+short_description: Query information about GitHub webhooks
+description:
+ - "Query information about GitHub webhooks"
+ - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the github api
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: List hooks for a repository (password auth)
+ community.general.github_webhook_info:
+ repository: ansible/ansible
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+ register: ansible_webhooks
+
+- name: List hooks for a repository on GitHub Enterprise (token auth)
+ community.general.github_webhook_info:
+ repository: myorg/myrepo
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com/api/v3/
+ register: myrepo_webhooks
+'''
+
+RETURN = '''
+---
+hooks:
+ description: A list of hooks that exist for the repo
+ returned: always
+ type: list
+ sample: >
+ [{"has_shared_secret": true,
+ "url": "https://jenkins.example.com/ghprbhook/",
+ "events": ["issue_comment", "pull_request"],
+ "insecure_ssl": "1",
+ "content_type": "json",
+ "active": true,
+ "id": 6206,
+ "last_response": {"status": "active", "message": "OK", "code": 200}}]
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _munge_hook(hook_obj):
+ retval = {
+ "active": hook_obj.active,
+ "events": hook_obj.events,
+ "id": hook_obj.id,
+ "url": hook_obj.url,
+ }
+ retval.update(hook_obj.config)
+ retval["has_shared_secret"] = "secret" in retval
+ if "secret" in retval:
+ del retval["secret"]
+
+ retval["last_response"] = hook_obj.last_response.raw_data
+ return retval
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=["repo"]),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'), ),
+ required_one_of=(("password", "token"), ),
+ supports_check_mode=True)
+ if module._name in ('github_webhook_facts', 'community.general.github_webhook_facts'):
+ module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ try:
+ hooks = [_munge_hook(h) for h in repo.get_hooks()]
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to get hooks from repository %s: %s" %
+ (module.params["repository"], to_native(err)),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, hooks=hooks)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
new file mode 100644
index 00000000..c66a6f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_deploy_key
+short_description: Manages GitLab project deploy keys.
+description:
+ - Adds, updates and removes project deploy keys
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of project in the form of group/name.
+ required: true
+ type: str
+ title:
+ description:
+ - Deploy key's title.
+ required: true
+ type: str
+ key:
+ description:
+ - Deploy key
+ required: true
+ type: str
+ can_push:
+ description:
+ - Whether this key can push to the project.
+ type: bool
+ default: no
+ state:
+ description:
+ - When C(present) the deploy key added to the project if it doesn't exist.
+ - When C(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+- name: "Adding a project deploy key"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+- name: "Update the above deploy key to add push access"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ can_push: yes
+
+- name: "Remove the previous deploy key from the project"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ state: absent
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: key is already in use"
+
+deploy_key:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabDeployKey(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.deployKeyObject = None
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ @param key_key String of the key
+ @param key_can_push Option of the deployKey
+ @param options Deploy key options
+ '''
+ def createOrUpdateDeployKey(self, project, key_title, key_key, options):
+ changed = False
+
+ # Because we have already call existsDeployKey in main()
+ if self.deployKeyObject is None:
+ deployKey = self.createDeployKey(project, {
+ 'title': key_title,
+ 'key': key_key,
+ 'can_push': options['can_push']})
+ changed = True
+ else:
+ changed, deployKey = self.updateDeployKey(self.deployKeyObject, {
+ 'can_push': options['can_push']})
+
+ self.deployKeyObject = deployKey
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
+
+ try:
+ deployKey.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update deploy key: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the deployKey
+ '''
+ def createDeployKey(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ deployKey = project.keys.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
+
+ return deployKey
+
+ '''
+ @param deployKey Deploy Key Object
+ @param arguments Attributes of the deployKey
+ '''
+ def updateDeployKey(self, deployKey, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(deployKey, arg_key) != arguments[arg_key]:
+ setattr(deployKey, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, deployKey)
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def findDeployKey(self, project, key_title):
+ deployKeys = project.keys.list()
+ for deployKey in deployKeys:
+ if (deployKey.title == key_title):
+ return deployKey
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def existsDeployKey(self, project, key_title):
+ # When project exists, object will be stored in self.projectObject.
+ deployKey = self.findDeployKey(project, key_title)
+ if deployKey:
+ self.deployKeyObject = deployKey
+ return True
+ return False
+
+ def deleteDeployKey(self):
+ if self._module.check_mode:
+ return True
+
+ return self.deployKeyObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ key=dict(type='str', required=True),
+ can_push=dict(type='bool', default=False),
+ title=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ key_title = module.params['title']
+ key_keyfile = module.params['key']
+ key_can_push = module.params['can_push']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
+
+ deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title)
+
+ if state == 'absent':
+ if deployKey_exists:
+ gitlab_deploy_key.deleteDeployKey()
+ module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
+ else:
+ module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py
new file mode 100644
index 00000000..0c612733
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes GitLab Groups
+description:
+ - When the group does not exist in GitLab, it will be created.
+ - When the group does exist and state=absent, the group will be deleted.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the group you want to create, this will be api_url/group_path
+ - If not supplied, the group_name will be used.
+ type: str
+ description:
+ description:
+ - A description for the group.
+ type: str
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ parent:
+ description:
+ - Allow to create subgroups
+ - Id or Full path of parent group in the form of group/name
+ type: str
+ visibility:
+ description:
+ - Default visibility of the group
+ choices: ["private", "internal", "public"]
+ default: private
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_group
+ state: absent
+
+- name: "Create GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+
+# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group
+- name: "Create GitLab SubGroup"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+ parent: "super_parent/parent"
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+group:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.groupObject = None
+
+ '''
+ @param group Group object
+ '''
+ def getGroupId(self, group):
+ if group is not None:
+ return group.id
+ return None
+
+ '''
+ @param name Name of the group
+ @param parent Parent group full path
+ @param options Group options
+ '''
+ def createOrUpdateGroup(self, name, parent, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.groupObject is None:
+ parent_id = self.getGroupId(parent)
+
+ payload = {
+ 'name': name,
+ 'path': options['path'],
+ 'parent_id': parent_id,
+ 'visibility': options['visibility']
+ }
+ if options.get('description'):
+ payload['description'] = options['description']
+ group = self.createGroup(payload)
+ changed = True
+ else:
+ changed, group = self.updateGroup(self.groupObject, {
+ 'name': name,
+ 'description': options['description'],
+ 'visibility': options['visibility']})
+
+ self.groupObject = group
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name)
+
+ try:
+ group.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update group: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the group
+ '''
+ def createGroup(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ group = self._gitlab.groups.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
+
+ return group
+
+ '''
+ @param group Group Object
+ @param arguments Attributes of the group
+ '''
+ def updateGroup(self, group, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(group, arg_key) != arguments[arg_key]:
+ setattr(group, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, group)
+
+ def deleteGroup(self):
+ group = self.groupObject
+
+ if len(group.projects.list()) >= 1:
+ self._module.fail_json(
+ msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ else:
+ if self._module.check_mode:
+ return True
+
+ try:
+ group.delete()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
+
+ '''
+ @param name Name of the groupe
+ @param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ '''
+ def existsGroup(self, project_identifier):
+ # When group/user exists, object will be stored in self.groupObject.
+ group = findGroup(self._gitlab, project_identifier)
+ if group:
+ self.groupObject = group
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ parent=dict(type='str'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_name = module.params['name']
+ group_path = module.params['path']
+ description = module.params['description']
+ state = module.params['state']
+ parent_identifier = module.params['parent']
+ group_visibility = module.params['visibility']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Define default group_path based on group_name
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ gitlab_group = GitLabGroup(module, gitlab_instance)
+
+ parent_group = None
+ if parent_identifier:
+ parent_group = findGroup(gitlab_instance, parent_identifier)
+ if not parent_group:
+ module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists")
+
+ group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path)
+ else:
+ group_exists = gitlab_group.existsGroup(group_path)
+
+ if state == 'absent':
+ if group_exists:
+ gitlab_group.deleteGroup()
+ module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
+ else:
+ module.exit_json(changed=False, msg="Group deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
+ "path": group_path,
+ "description": description,
+ "visibility": group_visibility}):
+ module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
new file mode 100644
index 00000000..8a3da2a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_group_members
+short_description: Manage group members on GitLab Server
+description:
+ - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab.
+version_added: '1.2.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - administrator rights on the GitLab server
+extends_documentation_fragment: community.general.auth_basic
+options:
+ api_token:
+ description:
+ - A personal access token to authenticate with the GitLab API.
+ required: true
+ type: str
+ gitlab_group:
+ description:
+ - The name of the GitLab group the member is added to/removed from.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - The username of the member to add to/remove from the GitLab group.
+ required: true
+ type: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ state:
+ description:
+ - State of the member in the group.
+ - On C(present), it adds a user to a GitLab group.
+ - On C(absent), it removes a user from a GitLab group.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+import traceback
+
+try:
+ import gitlab
+ HAS_PY_GITLAB = True
+except ImportError:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_PY_GITLAB = False
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ # get user id if the user exists
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user)
+ if user_exists:
+ return user_exists[0].id
+
+ # get group id if group exists
+ def get_group_id(self, gitlab_group):
+ group_exists = self._gitlab.groups.list(search=gitlab_group)
+ if group_exists:
+ return group_exists[0].id
+
+ # get all members in a group
+ def get_members_in_a_group(self, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ return group.members.list()
+
+ # check if the user is a member of the group
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a group
+ def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ add_member = group.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ if add_member:
+ return add_member.username
+
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e))
+
+ # remove user from a group
+ def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ group.members.delete(gitlab_user_id)
+
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ self._module.fail_json(
+ msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e))
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a group
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ try:
+ member.access_level = access_level
+ member.save()
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', required=True, no_log=True),
+ gitlab_group=dict(type='str', required=True),
+ gitlab_user=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level']],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PY_GITLAB:
+ module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
+
+ gitlab_group = module.params['gitlab_group']
+ gitlab_user = module.params['gitlab_user']
+ state = module.params['state']
+ access_level = module.params['access_level']
+
+ # convert access level string input to int
+ if access_level:
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS
+ }
+
+ access_level = access_level_int[access_level]
+
+ # connect to gitlab server
+ gl = gitlabAuthentication(module)
+
+ group = GitLabGroup(module, gl)
+
+ gitlab_user_id = group.get_user_id(gitlab_user)
+ gitlab_group_id = group.get_group_id(gitlab_group)
+
+ # group doesn't exist
+ if not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user)
+ else:
+ module.fail_json(msg="user '%s' not found." % gitlab_user)
+
+ members = group.get_members_in_a_group(gitlab_group_id)
+ is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the group
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the group
+ if not module.check_mode:
+ group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level)
+ module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user)
+ # state as absent
+ else:
+ module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user)
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = group.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == access_level:
+ module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user)
+ else:
+ # update the access level for the user
+ if not module.check_mode:
+ group.update_user_access_level(members, gitlab_user_id, access_level)
+ module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
+ else:
+ # remove the user from the group
+ if not module.check_mode:
+ group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
+ module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
new file mode 100644
index 00000000..dd20a0b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io)
+# Based on code:
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_variable
+short_description: Creates, updates, or deletes GitLab groups variables
+version_added: 1.2.0
+description:
+ - Creates a group variable if it does not exist.
+ - When a group variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab group,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - Florent Madiot (@scodeman)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete group variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ group:
+ description:
+ - The path and name of the group.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to C(true), delete all variables which are not untouched in the task.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, set masked and protected to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
+ default: {}
+ type: dict
+notes:
+- Supports I(check_mode).
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = r'''
+group_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabGroupVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.group = self.get_group(module.params['group'])
+ self._module = module
+
+ def get_group(self, group_name):
+ return self.repo.groups.get(group_name)
+
+ def list_all_group_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.group.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.group.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.group.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.group.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+ if not isinstance(var_list[key], (string_types, integer_types, float, dict)):
+ module.fail_json(msg="Value of %s variable must be of type string, integer, float or dict, passed %s" % (key, var_list[key].__class__.__name__))
+
+ for key in var_list:
+
+ if isinstance(var_list[key], (string_types, integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ group=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=changed, group_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py
new file mode 100644
index 00000000..bc4b6ecb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_hook
+short_description: Manages GitLab project hooks.
+description:
+ - Adds, updates and removes project hook
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of the project in the form of group/name.
+ required: true
+ type: str
+ hook_url:
+ description:
+ - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ required: true
+ type: str
+ state:
+ description:
+ - When C(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When C(absent) hook will be deleted if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ push_events:
+ description:
+ - Trigger hook on push events.
+ type: bool
+ default: yes
+ push_events_branch_filter:
+ description:
+ - Branch name of wildcard to trigger hook on push events
+ type: str
+ version_added: '0.2.0'
+ issues_events:
+ description:
+ - Trigger hook on issues events.
+ type: bool
+ default: no
+ merge_requests_events:
+ description:
+ - Trigger hook on merge requests events.
+ type: bool
+ default: no
+ tag_push_events:
+ description:
+ - Trigger hook on tag push events.
+ type: bool
+ default: no
+ note_events:
+ description:
+ - Trigger hook on note events or when someone adds a comment.
+ type: bool
+ default: no
+ job_events:
+ description:
+ - Trigger hook on job events.
+ type: bool
+ default: no
+ pipeline_events:
+ description:
+ - Trigger hook on pipeline events.
+ type: bool
+ default: no
+ wiki_page_events:
+ description:
+ - Trigger hook on wiki events.
+ type: bool
+ default: no
+ hook_validate_certs:
+ description:
+ - Whether GitLab will do SSL verification when triggering the hook.
+ type: bool
+ default: no
+ aliases: [ enable_ssl_verification ]
+ token:
+ description:
+ - Secret token to validate hook messages at the receiver.
+ - If this is present it will always result in a change as it cannot be retrieved from GitLab.
+ - Will show up in the X-GitLab-Token HTTP request header.
+ required: false
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Adding a project hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: present
+ push_events: yes
+ tag_push_events: yes
+ hook_validate_certs: no
+ token: "my-super-secret-token-that-my-ci-server-will-check"
+
+- name: "Delete the previous hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+
+- name: "Delete a hook by numeric project id"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: 10
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+hook:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabHook(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.hookObject = None
+
+ '''
+ @param project Project Object
+ @param hook_url Url to call on event
+ @param description Description of the group
+ @param parent Parent group full path
+ '''
+ def createOrUpdateHook(self, project, hook_url, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.hookObject is None:
+ hook = self.createHook(project, {
+ 'url': hook_url,
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+ changed = True
+ else:
+ changed, hook = self.updateHook(self.hookObject, {
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+
+ self.hookObject = hook
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
+
+ try:
+ hook.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update hook: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the hook
+ '''
+ def createHook(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ hook = project.hooks.create(arguments)
+
+ return hook
+
+ '''
+ @param hook Hook Object
+ @param arguments Attributes of the hook
+ '''
+ def updateHook(self, hook, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(hook, arg_key) != arguments[arg_key]:
+ setattr(hook, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, hook)
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def findHook(self, project, hook_url):
+ hooks = project.hooks.list()
+ for hook in hooks:
+ if (hook.url == hook_url):
+ return hook
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def existsHook(self, project, hook_url):
+ # When project exists, object will be stored in self.projectObject.
+ hook = self.findHook(project, hook_url)
+ if hook:
+ self.hookObject = hook
+ return True
+ return False
+
+ def deleteHook(self):
+ if self._module.check_mode:
+ return True
+
+ return self.hookObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ hook_url=dict(type='str', required=True),
+ push_events=dict(type='bool', default=True),
+ push_events_branch_filter=dict(type='str', default=''),
+ issues_events=dict(type='bool', default=False),
+ merge_requests_events=dict(type='bool', default=False),
+ tag_push_events=dict(type='bool', default=False),
+ note_events=dict(type='bool', default=False),
+ job_events=dict(type='bool', default=False),
+ pipeline_events=dict(type='bool', default=False),
+ wiki_page_events=dict(type='bool', default=False),
+ hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
+ token=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ hook_url = module.params['hook_url']
+ push_events = module.params['push_events']
+ push_events_branch_filter = module.params['push_events_branch_filter']
+ issues_events = module.params['issues_events']
+ merge_requests_events = module.params['merge_requests_events']
+ tag_push_events = module.params['tag_push_events']
+ note_events = module.params['note_events']
+ job_events = module.params['job_events']
+ pipeline_events = module.params['pipeline_events']
+ wiki_page_events = module.params['wiki_page_events']
+ enable_ssl_verification = module.params['hook_validate_certs']
+ hook_token = module.params['token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_hook = GitLabHook(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
+
+ hook_exists = gitlab_hook.existsHook(project, hook_url)
+
+ if state == 'absent':
+ if hook_exists:
+ gitlab_hook.deleteHook()
+ module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
+ else:
+ module.exit_json(changed=False, msg="Hook deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_hook.createOrUpdateHook(project, hook_url, {
+ "push_events": push_events,
+ "push_events_branch_filter": push_events_branch_filter,
+ "issues_events": issues_events,
+ "merge_requests_events": merge_requests_events,
+ "tag_push_events": tag_push_events,
+ "note_events": note_events,
+ "job_events": job_events,
+ "pipeline_events": pipeline_events,
+ "wiki_page_events": wiki_page_events,
+ "enable_ssl_verification": enable_ssl_verification,
+ "token": hook_token}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py
new file mode 100644
index 00000000..98631c74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes GitLab Projects
+description:
+ - When the project does not exist in GitLab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ group:
+ description:
+ - Id or The full path of the group of which this projects belongs to.
+ type: str
+ name:
+ description:
+ - The name of the project
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path.
+ - If not supplied, name will be used.
+ type: str
+ description:
+ description:
+ - An description for the project.
+ type: str
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ visibility:
+ description:
+ - Private. Project access must be granted explicitly for each user.
+ - Internal. The project can be cloned by any logged in user.
+ - Public. The project can be cloned without any authentication.
+ default: private
+ type: str
+ choices: ["private", "internal", "public"]
+ aliases:
+ - visibility_level
+ import_url:
+ description:
+ - Git repository which will be imported into gitlab.
+ - GitLab server needs read access to this git repository.
+ required: false
+ type: str
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ merge_method:
+ description:
+ - What requirements are placed upon merges.
+ - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ type: str
+ choices: ["ff", "merge", "rebase_merge"]
+ default: merge
+ version_added: "1.0.0"
+'''
+
+EXAMPLES = r'''
+- name: Delete GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_project
+ state: absent
+ delegate_to: localhost
+
+- name: Create GitLab Project in group Ansible
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_project
+ group: ansible
+ issues_enabled: False
+ merge_method: rebase_merge
+ wiki_enabled: True
+ snippets_enabled: True
+ import_url: http://git.example.com/example/lab.git
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server.
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API.
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+project:
+ description: API object.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication
+
+
+class GitLabProject(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.projectObject = None
+
+ '''
+ @param project_name Name of the project
+ @param namespace Namespace Object (User or Group)
+ @param options Options of the project
+ '''
+ def createOrUpdateProject(self, project_name, namespace, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.projectObject is None:
+ project = self.createProject(namespace, {
+ 'name': project_name,
+ 'path': options['path'],
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility'],
+ 'import_url': options['import_url']})
+ changed = True
+ else:
+ changed, project = self.updateProject(self.projectObject, {
+ 'name': project_name,
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility']})
+
+ self.projectObject = project
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
+
+ try:
+ project.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed update project: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param namespace Namespace Object (User or Group)
+ @param arguments Attributes of the project
+ '''
+ def createProject(self, namespace, arguments):
+ if self._module.check_mode:
+ return True
+
+ arguments['namespace_id'] = namespace.id
+ try:
+ project = self._gitlab.projects.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
+
+ return project
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the project
+ '''
+ def updateProject(self, project, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(project, arg_key) != arguments[arg_key]:
+ setattr(project, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, project)
+
+ def deleteProject(self):
+ if self._module.check_mode:
+ return True
+
+ project = self.projectObject
+
+ return project.delete()
+
+ '''
+ @param namespace User/Group object
+ @param name Name of the project
+ '''
+ def existsProject(self, namespace, path):
+ # When project exists, object will be stored in self.projectObject.
+ project = findProject(self._gitlab, namespace.full_path + '/' + path)
+ if project:
+ self.projectObject = project
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ group=dict(type='str'),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ issues_enabled=dict(type='bool', default=True),
+ merge_requests_enabled=dict(type='bool', default=True),
+ merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]),
+ wiki_enabled=dict(type='bool', default=True),
+ snippets_enabled=dict(default=True, type='bool'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
+ import_url=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_identifier = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ project_description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ merge_method = module.params['merge_method']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ visibility = module.params['visibility']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ gitlab_project = GitLabProject(module, gitlab_instance)
+
+ if group_identifier:
+ group = findGroup(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
+
+ namespace = gitlab_instance.namespaces.get(group.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+ else:
+ user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0]
+ namespace = gitlab_instance.namespaces.get(user.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+
+ if state == 'absent':
+ if project_exists:
+ gitlab_project.deleteProject()
+ module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
+ else:
+ module.exit_json(changed=False, msg="Project deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_project.createOrUpdateProject(project_name, namespace, {
+ "path": project_path,
+ "description": project_description,
+ "issues_enabled": issues_enabled,
+ "merge_requests_enabled": merge_requests_enabled,
+ "merge_method": merge_method,
+ "wiki_enabled": wiki_enabled,
+ "snippets_enabled": snippets_enabled,
+ "visibility": visibility,
+ "import_url": import_url}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
new file mode 100644
index 00000000..9803f76b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_project_variable
+short_description: Creates/updates/deletes GitLab Projects Variables
+description:
+ - When a project variable does not exist, it will be created.
+ - When a project variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab project,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - "Markus Bergholz (@markuman)"
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete project variable.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to true, all variables which are not untouched in the task will be deleted.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, masked and protected will be set to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
+ default: {}
+ type: dict
+'''
+
+
+EXAMPLES = '''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = '''
+project_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabProjectVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.project = self.get_project(module.params['project'])
+ self._module = module
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def list_all_project_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.project.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.project.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.project.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.project.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+
+ if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+ else:
+ module.fail_json(msg="value must be of type string, integer or dict")
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
+
+ change, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=change, project_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py
new file mode 100644
index 00000000..70384914
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_runner
+short_description: Create, modify and delete GitLab Runners.
+description:
+ - Register, update and delete runners with the GitLab API.
+ - All operations are performed using the GitLab API v4.
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
+ U(https://$GITLAB_URL/profile/personal_access_tokens).
+ - A valid registration token is required for registering a new runner.
+ To create shared runners, you need to ask your administrator to give you this token.
+ It can be found at U(https://$GITLAB_URL/admin/runners/).
+notes:
+ - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - Runners need to have unique descriptions.
+author:
+ - Samy Coenen (@SamyCoenen)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 1.5.0
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - Your private token to interact with the GitLab API.
+ type: str
+ description:
+ description:
+ - The unique name of the runner.
+ required: True
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name.
+ required: False
+ default: present
+ choices: ["present", "absent"]
+ type: str
+ registration_token:
+ description:
+ - The registration token is used to register new runners.
+ required: True
+ type: str
+ active:
+ description:
+ - Define if the runners is immediately active after creation.
+ required: False
+ default: yes
+ type: bool
+ locked:
+ description:
+ - Determines if the runner is locked or not.
+ required: False
+ default: False
+ type: bool
+ access_level:
+ description:
+ - Determines if a runner can pick up jobs from protected branches.
+ required: False
+ default: ref_protected
+ choices: ["ref_protected", "not_protected"]
+ type: str
+ maximum_timeout:
+ description:
+ - The maximum timeout that a runner has to pick up a specific job.
+ required: False
+ default: 3600
+ type: int
+ run_untagged:
+ description:
+ - Run untagged jobs or not.
+ required: False
+ default: yes
+ type: bool
+ tag_list:
+ description: The tags that apply to the runner.
+ required: False
+ default: []
+ type: list
+'''
+
+EXAMPLES = '''
+- name: "Register runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: Docker Machine t1
+ state: present
+ active: True
+ tag_list: ['docker']
+ run_untagged: False
+ locked: False
+
+- name: "Delete runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+runner:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+try:
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+
+class GitLabRunner(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.runnerObject = None
+
+ def createOrUpdateRunner(self, description, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.runnerObject is None:
+ runner = self.createRunner({
+ 'description': description,
+ 'active': options['active'],
+ 'token': options['registration_token'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'tag_list': options['tag_list']})
+ changed = True
+ else:
+ changed, runner = self.updateRunner(self.runnerObject, {
+ 'active': options['active'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'access_level': options['access_level'],
+ 'tag_list': options['tag_list']})
+
+ self.runnerObject = runner
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the runner
+ '''
+ def createRunner(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ runner = self._gitlab.runners.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
+
+ return runner
+
+ '''
+ @param runner Runner object
+ @param arguments Attributes of the runner
+ '''
+ def updateRunner(self, runner, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if isinstance(arguments[arg_key], list):
+ list1 = getattr(runner, arg_key)
+ list1.sort()
+ list2 = arguments[arg_key]
+ list2.sort()
+ if cmp(list1, list2):
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+ else:
+ if getattr(runner, arg_key) != arguments[arg_key]:
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, runner)
+
+ '''
+ @param description Description of the runner
+ '''
+ def findRunner(self, description):
+ runners = self._gitlab.runners.all(as_list=False)
+ for runner in runners:
+ if (runner['description'] == description):
+ return self._gitlab.runners.get(runner['id'])
+
+ '''
+ @param description Description of the runner
+ '''
+ def existsRunner(self, description):
+ # When runner exists, object will be stored in self.runnerObject.
+ runner = self.findRunner(description)
+
+ if runner:
+ self.runnerObject = runner
+ return True
+ return False
+
+ def deleteRunner(self):
+ if self._module.check_mode:
+ return True
+
+ runner = self.runnerObject
+
+ return runner.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ description=dict(type='str', required=True, aliases=["name"]),
+ active=dict(type='bool', default=True),
+ tag_list=dict(type='list', default=[]),
+ run_untagged=dict(type='bool', default=True),
+ locked=dict(type='bool', default=False),
+ access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
+ maximum_timeout=dict(type='int', default=3600),
+ registration_token=dict(type='str', required=True, no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ runner_description = module.params['description']
+ runner_active = module.params['active']
+ tag_list = module.params['tag_list']
+ run_untagged = module.params['run_untagged']
+ runner_locked = module.params['locked']
+ access_level = module.params['access_level']
+ maximum_timeout = module.params['maximum_timeout']
+ registration_token = module.params['registration_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_runner = GitLabRunner(module, gitlab_instance)
+ runner_exists = gitlab_runner.existsRunner(runner_description)
+
+ if state == 'absent':
+ if runner_exists:
+ gitlab_runner.deleteRunner()
+ module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, msg="Runner deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_runner.createOrUpdateRunner(runner_description, {
+ "active": runner_active,
+ "tag_list": tag_list,
+ "run_untagged": run_untagged,
+ "locked": runner_locked,
+ "access_level": access_level,
+ "maximum_timeout": maximum_timeout,
+ "registration_token": registration_token}):
+ module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs,
+ msg="Successfully created or updated the runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs,
+ msg="No need to update the runner %s" % runner_description)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py
new file mode 100644
index 00000000..1e8ee65a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
+description:
+ - When the user does not exist in GitLab, it will be created.
+ - When the user exists and state=absent, the user will be deleted.
+ - When the user exists and state=blocked, the user will be blocked.
+ - When changes are made to user, the user will be updated.
+notes:
+ - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the user you want to create.
+ - Required only if C(state) is set to C(present).
+ type: str
+ username:
+ description:
+ - The username of the user.
+ required: true
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
+ - Required only if C(state) is set to C(present).
+ type: str
+ email:
+ description:
+ - The email that belongs to the user.
+ - Required only if C(state) is set to C(present).
+ type: str
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ type: str
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ type: str
+ group:
+ description:
+ - Id or Full path of parent group in the form of group/name.
+ - Add user as an member to this group.
+ type: str
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master (alias for maintainer)
+ - maintainer
+ - owner
+ default: guest
+ type: str
+ choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
+ state:
+ description:
+ - Create, delete or block a user.
+ default: present
+ type: str
+ choices: ["present", "absent", "blocked", "unblocked"]
+ confirm:
+ description:
+ - Require confirmation.
+ type: bool
+ default: yes
+ isadmin:
+ description:
+ - Grant admin privileges to the user.
+ type: bool
+ default: no
+ external:
+ description:
+ - Define external parameter for this user.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: absent
+
+- name: "Create GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ sshkey_name: MySSH
+ sshkey_file: ssh-rsa AAAAB3NzaC1yc...
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Block GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: blocked
+
+- name: "Unblock GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: unblocked
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+user:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabUser(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.userObject = None
+ self.ACCESS_LEVEL = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'master': gitlab.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS}
+
+ '''
+ @param username Username of the user
+ @param options User options
+ '''
+ def createOrUpdateUser(self, username, options):
+ changed = False
+ potentionally_changed = False
+
+ # Because we have already call userExists in main()
+ if self.userObject is None:
+ user = self.createUser({
+ 'name': options['name'],
+ 'username': username,
+ 'password': options['password'],
+ 'email': options['email'],
+ 'skip_confirmation': not options['confirm'],
+ 'admin': options['isadmin'],
+ 'external': options['external']})
+ changed = True
+ else:
+ changed, user = self.updateUser(
+ self.userObject, {
+ # add "normal" parameters here, put uncheckable
+ # params in the dict below
+ 'name': {'value': options['name']},
+ 'email': {'value': options['email']},
+
+ # note: for some attributes like this one the key
+ # from reading back from server is unfortunately
+ # different to the one needed for pushing/writing,
+ # in that case use the optional setter key
+ 'is_admin': {
+ 'value': options['isadmin'], 'setter': 'admin'
+ },
+ 'external': {'value': options['external']},
+ },
+ {
+ # put "uncheckable" params here, this means params
+ # which the gitlab does accept for setting but does
+ # not return any information about it
+ 'skip_reconfirmation': {'value': not options['confirm']},
+ 'password': {'value': options['password']},
+ }
+ )
+
+ # note: as we unfortunately have some uncheckable parameters
+ # where it is not possible to determine if the update
+ # changed something or not, we must assume here that a
+ # changed happend and that an user object update is needed
+ potentionally_changed = True
+
+ # Assign ssh keys
+ if options['sshkey_name'] and options['sshkey_file']:
+ key_changed = self.addSshKeyToUser(user, {
+ 'name': options['sshkey_name'],
+ 'file': options['sshkey_file']})
+ changed = changed or key_changed
+
+ # Assign group
+ if options['group_path']:
+ group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level'])
+ changed = changed or group_changed
+
+ self.userObject = user
+ if (changed or potentionally_changed) and not self._module.check_mode:
+ try:
+ user.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
+
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
+ return True
+ else:
+ return False
+
+ '''
+ @param group User object
+ '''
+ def getUserId(self, user):
+ if user is not None:
+ return user.id
+ return None
+
+ '''
+ @param user User object
+ @param sshkey_name Name of the ssh key
+ '''
+ def sshKeyExists(self, user, sshkey_name):
+ keyList = map(lambda k: k.title, user.keys.list())
+
+ return sshkey_name in keyList
+
+ '''
+ @param user User object
+ @param sshkey Dict containing sshkey infos {"name": "", "file": ""}
+ '''
+ def addSshKeyToUser(self, user, sshkey):
+ if not self.sshKeyExists(user, sshkey['name']):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user.keys.create({
+ 'title': sshkey['name'],
+ 'key': sshkey['file']})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to find
+ '''
+ def findMember(self, group, user_id):
+ try:
+ member = group.members.get(user_id)
+ except gitlab.exceptions.GitlabGetError:
+ return None
+ return member
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ '''
+ def memberExists(self, group, user_id):
+ member = self.findMember(group, user_id)
+
+ return member is not None
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ @param access_level GitLab access_level to check
+ '''
+ def memberAsGoodAccessLevel(self, group, user_id, access_level):
+ member = self.findMember(group, user_id)
+
+ return member.access_level == access_level
+
+ '''
+ @param user User object
+ @param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ @param access_level GitLab access_level to assign
+ '''
+ def assignUserToGroup(self, user, group_identifier, access_level):
+ group = findGroup(self._gitlab, group_identifier)
+
+ if self._module.check_mode:
+ return True
+
+ if group is None:
+ return False
+
+ if self.memberExists(group, self.getUserId(user)):
+ member = self.findMember(group, self.getUserId(user))
+ if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
+ member.access_level = self.ACCESS_LEVEL[access_level]
+ member.save()
+ return True
+ else:
+ try:
+ group.members.create({
+ 'user_id': self.getUserId(user),
+ 'access_level': self.ACCESS_LEVEL[access_level]})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param user User object
+ @param arguments User attributes
+ '''
+ def updateUser(self, user, arguments, uncheckable_args):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ av = arg_value['value']
+
+ if av is not None:
+ if getattr(user, arg_key) != av:
+ setattr(user, arg_value.get('setter', arg_key), av)
+ changed = True
+
+ for arg_key, arg_value in uncheckable_args.items():
+ av = arg_value['value']
+
+ if av is not None:
+ setattr(user, arg_value.get('setter', arg_key), av)
+
+ return (changed, user)
+
+ '''
+ @param arguments User attributes
+ '''
+ def createUser(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user = self._gitlab.users.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
+
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def findUser(self, username):
+ users = self._gitlab.users.list(search=username)
+ for user in users:
+ if (user.username == username):
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def existsUser(self, username):
+ # When user exists, object will be stored in self.userObject.
+ user = self.findUser(username)
+ if user:
+ self.userObject = user
+ return True
+ return False
+
+ '''
+ @param username Username of the user
+ '''
+ def isActive(self, username):
+ user = self.findUser(username)
+ return user.attributes['state'] == 'active'
+
+ def deleteUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.delete()
+
+ def blockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.block()
+
+ def unblockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.unblock()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
+ username=dict(type='str', required=True),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str'),
+ sshkey_name=dict(type='str'),
+ sshkey_file=dict(type='str'),
+ group=dict(type='str'),
+ access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
+ confirm=dict(type='bool', default=True),
+ isadmin=dict(type='bool', default=False),
+ external=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['name', 'email', 'password']),
+ )
+ )
+
+ user_name = module.params['name']
+ state = module.params['state']
+ user_username = module.params['username'].lower()
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_path = module.params['group']
+ access_level = module.params['access_level']
+ confirm = module.params['confirm']
+ user_isadmin = module.params['isadmin']
+ user_external = module.params['external']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_user = GitLabUser(module, gitlab_instance)
+ user_exists = gitlab_user.existsUser(user_username)
+ if user_exists:
+ user_is_active = gitlab_user.isActive(user_username)
+ else:
+ user_is_active = False
+
+ if state == 'absent':
+ if user_exists:
+ gitlab_user.deleteUser()
+ module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User deleted or does not exists")
+
+ if state == 'blocked':
+ if user_exists and user_is_active:
+ gitlab_user.blockUser()
+ module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User already blocked or does not exists")
+
+ if state == 'unblocked':
+ if user_exists and not user_is_active:
+ gitlab_user.unblockUser()
+ module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User is not blocked or does not exists")
+
+ if state == 'present':
+ if gitlab_user.createOrUpdateUser(user_username, {
+ "name": user_name,
+ "password": user_password,
+ "email": user_email,
+ "sshkey_name": user_sshkey_name,
+ "sshkey_file": user_sshkey_file,
+ "group_path": group_path,
+ "access_level": access_level,
+ "confirm": confirm,
+ "isadmin": user_isadmin,
+ "external": user_external}):
+ module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py
new file mode 100644
index 00000000..46306585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_heal_info
+short_description: Gather information on self-heal or rebalance status
+author: "Devyani Kota (@devyanikota)"
+description:
+ - Gather facts about either self-heal or rebalance status.
+ - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)!
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ status_filter:
+ default: "self-heal"
+ choices: ["self-heal", "rebalance"]
+ description:
+ - Determines which facts are to be returned.
+ - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
+ - If the C(status_filter) is C(rebalance), rebalance status is returned.
+requirements:
+ - GlusterFS > 3.2
+'''
+
+EXAMPLES = '''
+- name: Gather self-heal facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: self-heal
+ register: self_heal_status
+- ansible.builtin.debug:
+ var: self_heal_status
+
+- name: Gather rebalance facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: rebalance
+ register: rebalance_status
+- ansible.builtin.debug:
+ var: rebalance_status
+'''
+
+RETURN = '''
+name:
+ description: GlusterFS volume name
+ returned: always
+ type: str
+status_filter:
+ description: Whether self-heal or rebalance status is to be returned
+ returned: always
+ type: str
+heal_info:
+ description: List of files that still need healing process
+ returned: On success
+ type: list
+rebalance_status:
+ description: Status of rebalance operation
+ returned: On success
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import LooseVersion
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def get_self_heal_status(name):
+ out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ heal_info = []
+ # return files that still need healing.
+ for line in raw_out:
+ if 'Brick' in line:
+ br_dict = {}
+ br_dict['brick'] = line.strip().strip("Brick")
+ elif 'Status' in line:
+ br_dict['status'] = line.split(":")[1].strip()
+ elif 'Number' in line:
+ br_dict['no_of_entries'] = line.split(":")[1].strip()
+ elif line.startswith('/') or line.startswith('<') or '\n' in line:
+ continue
+ else:
+ br_dict and heal_info.append(br_dict)
+ br_dict = {}
+ return heal_info
+
+
+def get_rebalance_status(name):
+ out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ rebalance_status = []
+ # return the files that are either still 'in progress' state or 'completed'.
+ for line in raw_out:
+ line = " ".join(line.split())
+ line_vals = line.split(" ")
+ if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
+ continue
+ node_dict = {}
+ if len(line_vals) == 1 or len(line_vals) == 4:
+ continue
+ node_dict['node'] = line_vals[0]
+ node_dict['rebalanced_files'] = line_vals[1]
+ node_dict['failures'] = line_vals[4]
+ if 'in progress' in line:
+ node_dict['status'] = line_vals[5] + line_vals[6]
+ rebalance_status.append(node_dict)
+ elif 'completed' in line:
+ node_dict['status'] = line_vals[5]
+ rebalance_status.append(node_dict)
+ return rebalance_status
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+def main():
+ global module
+ global glusterbin
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
+ ),
+ )
+ is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts')
+ if is_old_facts:
+ module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ glusterbin = module.get_bin_path('gluster', True)
+ required_version = "3.2"
+ status_filter = module.params['status_filter']
+ volume_name = module.params['name']
+ heal_info = ''
+ rebalance_status = ''
+
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+
+ try:
+ if status_filter == "self-heal":
+ heal_info = get_self_heal_status(volume_name)
+ elif status_filter == "rebalance":
+ rebalance_status = get_rebalance_status(volume_name)
+ except Exception as e:
+ module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
+
+ facts = {}
+ facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=facts)
+ else:
+ module.exit_json(**facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py
new file mode 100644
index 00000000..e9e6fd71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
+# Copyright 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_peer
+short_description: Attach/Detach peers to/from the cluster
+description:
+ - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
+ added into an existing trusted storage pool or a new storage pool can be
+ formed. Or, nodes can be removed from an existing trusted storage pool.
+author: Sachidananda Urs (@sac)
+options:
+ state:
+ choices: ["present", "absent"]
+ default: "present"
+ description:
+ - Determines whether the nodes should be attached to the pool or
+ removed from the pool. If the state is present, nodes will be
+ attached to the pool. If state is absent, nodes will be detached
+ from the pool.
+ type: str
+ nodes:
+ description:
+ - List of nodes that have to be probed into the pool.
+ required: true
+ type: list
+ force:
+ type: bool
+ default: false
+ description:
+ - Applicable only while removing the nodes from the pool. gluster
+ will refuse to detach a node from the pool if any one of the node
+ is down, in such cases force can be used.
+requirements:
+ - GlusterFS > 3.2
+notes:
+ - This module does not support check mode.
+'''
+
+EXAMPLES = '''
+- name: Create a trusted storage pool
+ community.general.gluster_peer:
+ state: present
+ nodes:
+ - 10.0.1.5
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool by force
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.0.1
+ force: true
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from distutils.version import LooseVersion
+
+
+class Peer(object):
+ def __init__(self, module):
+ self.module = module
+ self.state = self.module.params['state']
+ self.nodes = self.module.params['nodes']
+ self.glustercmd = self.module.get_bin_path('gluster', True)
+ self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ self.action = ''
+ self.force = ''
+
+ def gluster_peer_ops(self):
+ if not self.nodes:
+ self.module.fail_json(msg="nodes list cannot be empty")
+ self.force = 'force' if self.module.params.get('force') else ''
+ if self.state == 'present':
+ self.nodes = self.get_to_be_probed_hosts(self.nodes)
+ self.action = 'probe'
+ # In case of peer probe, we do not need `force'
+ self.force = ''
+ else:
+ self.action = 'detach'
+ self.call_peer_commands()
+
+ def get_to_be_probed_hosts(self, hosts):
+ peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
+ rc, output, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ peers_in_cluster = [line.split('\t')[1].strip() for
+ line in filter(None, output.split('\n')[1:])]
+ try:
+ peers_in_cluster.remove('localhost')
+ except ValueError:
+ # It is ok not to have localhost in list
+ pass
+ hosts_to_be_probed = [host for host in hosts if host not in
+ peers_in_cluster]
+ return hosts_to_be_probed
+
+ def call_peer_commands(self):
+ result = {}
+ result['msg'] = ''
+ result['changed'] = False
+
+ for node in self.nodes:
+ peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
+ if self.force:
+ peercmd.append(self.force)
+ rc, out, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ if rc:
+ result['rc'] = rc
+ result['msg'] = err
+ # Fail early, do not wait for the loop to finish
+ self.module.fail_json(**result)
+ else:
+ if 'already in peer' in out or \
+ 'localhost not needed' in out:
+ result['changed'] |= False
+ else:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', required=False, default=False),
+ nodes=dict(type='list', required=True),
+ state=dict(type='str', choices=['absent', 'present'],
+ default='present'),
+ ),
+ supports_check_mode=False
+ )
+ pops = Peer(module)
+ required_version = "3.2"
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+ pops.gluster_peer_ops()
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py
new file mode 100644
index 00000000..d6444ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gluster_volume
+short_description: Manage GlusterFS volumes
+description:
+ - Create, remove, start, stop and tune GlusterFS volumes
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ state:
+ description:
+ - Use present/absent ensure if a volume exists or not.
+ Use started/stopped to control its availability.
+ required: true
+ choices: ['absent', 'present', 'started', 'stopped']
+ cluster:
+ description:
+ - List of hosts to use for probing and brick setup.
+ host:
+ description:
+ - Override local hostname (for peer probing purposes).
+ replicas:
+ description:
+ - Replica count for volume.
+ arbiters:
+ description:
+ - Arbiter count for volume.
+ stripes:
+ description:
+ - Stripe count for volume.
+ disperses:
+ description:
+ - Disperse count for volume.
+ redundancies:
+ description:
+ - Redundancy count for volume.
+ transport:
+ description:
+ - Transport type for volume.
+ default: tcp
+ choices: [ tcp, rdma, 'tcp,rdma' ]
+ bricks:
+ description:
+ - Brick paths on servers. Multiple brick paths can be separated by commas.
+ aliases: [ brick ]
+ start_on_create:
+ description:
+ - Controls whether the volume is started after creation or not.
+ type: bool
+ default: 'yes'
+ rebalance:
+ description:
+ - Controls whether the cluster is rebalanced after changes.
+ type: bool
+ default: 'no'
+ directory:
+ description:
+ - Directory for limit-usage.
+ options:
+ description:
+ - A dictionary/hash with options/settings for the volume.
+ quota:
+ description:
+ - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
+ force:
+ description:
+ - If brick is being created in the root partition, module will fail.
+ Set force to true to override this behaviour.
+ type: bool
+ default: false
+notes:
+ - Requires cli tools for GlusterFS on servers.
+ - Will add new bricks, but not remove them.
+author:
+- Taneli Leppä (@rosmo)
+'''
+
+EXAMPLES = """
+- name: Create gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ bricks: /bricks/brick1/g1
+ rebalance: yes
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Tune
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ performance.cache-size: 256MB
+
+- name: Set multiple options on GlusterFS volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ { performance.cache-size: 128MB,
+ write-behind: 'off',
+ quick-read: 'on'
+ }
+
+- name: Start gluster volume
+ community.general.gluster_volume:
+ state: started
+ name: test1
+
+- name: Limit usage
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ directory: /foo
+ quota: 20.0MB
+
+- name: Stop gluster volume
+ community.general.gluster_volume:
+ state: stopped
+ name: test1
+
+- name: Remove gluster volume
+ community.general.gluster_volume:
+ state: absent
+ name: test1
+
+- name: Create gluster volume with multiple bricks
+ community.general.gluster_volume:
+ state: present
+ name: test2
+ bricks: /bricks/brick1/g2,/bricks/brick2/g2
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Remove the bricks from gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick1/b1,/bricks/brick2/b2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+
+- name: Reduce cluster configuration
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick3/b1,/bricks/brick4/b2
+ replicas: 2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+"""
+
+import re
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def run_gluster_nofail(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ return None
+ return out
+
+
+def get_peers():
+ out = run_gluster(['peer', 'status'])
+ peers = {}
+ hostname = None
+ uuid = None
+ state = None
+ shortNames = False
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'hostname':
+ hostname = value
+ shortNames = False
+ if key.lower() == 'uuid':
+ uuid = value
+ if key.lower() == 'state':
+ state = value
+ peers[hostname] = [uuid, state]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames is True:
+ peers[row] = [uuid, state]
+ elif row == '':
+ shortNames = False
+ return peers
+
+
+def get_volumes():
+ out = run_gluster(['volume', 'info'])
+
+ volumes = {}
+ volume = {}
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'volume name':
+ volume['name'] = value
+ volume['options'] = {}
+ volume['quota'] = False
+ if key.lower() == 'volume id':
+ volume['id'] = value
+ if key.lower() == 'status':
+ volume['status'] = value
+ if key.lower() == 'transport-type':
+ volume['transport'] = value
+ if value.lower().endswith(' (arbiter)'):
+ if 'arbiters' not in volume:
+ volume['arbiters'] = []
+ value = value[:-10]
+ volume['arbiters'].append(value)
+ elif key.lower() == 'number of bricks':
+ volume['replicas'] = value[-1:]
+ if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
+ if 'bricks' not in volume:
+ volume['bricks'] = []
+ volume['bricks'].append(value)
+ # Volume options
+ if '.' in key:
+ if 'options' not in volume:
+ volume['options'] = {}
+ volume['options'][key] = value
+ if key == 'features.quota' and value == 'on':
+ volume['quota'] = True
+ else:
+ if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
+ if len(volume) > 0:
+ volumes[volume['name']] = volume
+ volume = {}
+ return volumes
+
+
+def get_quotas(name, nofail):
+ quotas = {}
+ if nofail:
+ out = run_gluster_nofail(['volume', 'quota', name, 'list'])
+ if not out:
+ return quotas
+ else:
+ out = run_gluster(['volume', 'quota', name, 'list'])
+ for row in out.split('\n'):
+ if row[:1] == '/':
+ q = re.split(r'\s+', row)
+ quotas[q[0]] = q[1]
+ return quotas
+
+
+def wait_for_peer(host):
+ for x in range(0, 4):
+ peers = get_peers()
+ if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
+ return True
+ time.sleep(1)
+ return False
+
+
+def probe(host, myhostname):
+ global module
+ out = run_gluster(['peer', 'probe', host])
+ if out.find('localhost') == -1 and not wait_for_peer(host):
+ module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
+
+
+def probe_all_peers(hosts, peers, myhostname):
+ for host in hosts:
+ host = host.strip() # Clean up any extra space for exact comparison
+ if host not in peers:
+ probe(host, myhostname)
+
+
+def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
+ args = ['volume', 'create']
+ args.append(name)
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ if arbiter:
+ args.append('arbiter')
+ args.append(str(arbiter))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
+ args.append('transport')
+ args.append(transport)
+ for brick in bricks:
+ for host in hosts:
+ args.append(('%s:%s' % (host, brick)))
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def start_volume(name):
+ run_gluster(['volume', 'start', name])
+
+
+def stop_volume(name):
+ run_gluster(['volume', 'stop', name])
+
+
+def set_volume_option(name, option, parameter):
+ run_gluster(['volume', 'set', name, option, parameter])
+
+
+def add_bricks(name, new_bricks, stripe, replica, force):
+ args = ['volume', 'add-brick', name]
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ args.extend(new_bricks)
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def remove_bricks(name, removed_bricks, force):
+ # max-tries=12 with default_interval=10 secs
+ max_tries = 12
+ retries = 0
+ success = False
+ args = ['volume', 'remove-brick', name]
+ args.extend(removed_bricks)
+ # create a copy of args to use for commit operation
+ args_c = args[:]
+ args.append('start')
+ run_gluster(args)
+ # remove-brick operation needs to be followed by commit operation.
+ if not force:
+ module.fail_json(msg="Force option is mandatory.")
+ else:
+ while retries < max_tries:
+ last_brick = removed_bricks[-1]
+ out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
+ for row in out.split('\n')[1:]:
+ if 'completed' in row:
+ # remove-brick successful, call commit operation.
+ args_c.append('commit')
+ out = run_gluster(args_c)
+ success = True
+ break
+ else:
+ time.sleep(10)
+ if success:
+ break
+ retries += 1
+ if not success:
+ # remove-brick still in process, needs to be committed after completion.
+ module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
+ "Commit operation needs to be followed.")
+
+
+def reduce_config(name, removed_bricks, replicas, force):
+ out = run_gluster(['volume', 'heal', name, 'info'])
+ summary = out.split("\n")
+ for line in summary:
+ if 'Number' in line and int(line.split(":")[1].strip()) != 0:
+ module.fail_json(msg="Operation aborted, self-heal in progress.")
+ args = ['volume', 'remove-brick', name, 'replica', replicas]
+ args.extend(removed_bricks)
+ if force:
+ args.append('force')
+ else:
+ module.fail_json(msg="Force option is mandatory")
+ run_gluster(args)
+
+
+def do_rebalance(name):
+ run_gluster(['volume', 'rebalance', name, 'start'])
+
+
+def enable_quota(name):
+ run_gluster(['volume', 'quota', name, 'enable'])
+
+
+def set_quota(name, directory, value):
+ run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
+
+
+def main():
+ # MAIN
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
+ cluster=dict(type='list'),
+ host=dict(type='str'),
+ stripes=dict(type='int'),
+ replicas=dict(type='int'),
+ arbiters=dict(type='int'),
+ disperses=dict(type='int'),
+ redundancies=dict(type='int'),
+ transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
+ bricks=dict(type='str', aliases=['brick']),
+ start_on_create=dict(type='bool', default=True),
+ rebalance=dict(type='bool', default=False),
+ options=dict(type='dict', default={}),
+ quota=dict(type='str'),
+ directory=dict(type='str'),
+ force=dict(type='bool', default=False),
+ ),
+ )
+
+ global glusterbin
+ glusterbin = module.get_bin_path('gluster', True)
+
+ changed = False
+
+ action = module.params['state']
+ volume_name = module.params['name']
+ cluster = module.params['cluster']
+ brick_paths = module.params['bricks']
+ stripes = module.params['stripes']
+ replicas = module.params['replicas']
+ arbiters = module.params['arbiters']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
+ transport = module.params['transport']
+ myhostname = module.params['host']
+ start_on_create = module.boolean(module.params['start_on_create'])
+ rebalance = module.boolean(module.params['rebalance'])
+ force = module.boolean(module.params['force'])
+
+ if not myhostname:
+ myhostname = socket.gethostname()
+
+ # Clean up if last element is empty. Consider that yml can look like this:
+ # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
+ if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
+ cluster = cluster[0:-1]
+
+ if cluster is None:
+ cluster = []
+
+ if brick_paths is not None and "," in brick_paths:
+ brick_paths = brick_paths.split(",")
+ else:
+ brick_paths = [brick_paths]
+
+ options = module.params['options']
+ quota = module.params['quota']
+ directory = module.params['directory']
+
+ # get current state info
+ peers = get_peers()
+ volumes = get_volumes()
+ quotas = {}
+ if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
+ quotas = get_quotas(volume_name, True)
+
+ # do the work!
+ if action == 'absent':
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ run_gluster(['volume', 'delete', volume_name])
+ changed = True
+
+ if action == 'present':
+ probe_all_peers(cluster, peers, myhostname)
+
+ # create if it doesn't exist
+ if volume_name not in volumes:
+ create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
+ volumes = get_volumes()
+ changed = True
+
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
+ start_volume(volume_name)
+ changed = True
+
+ # switch bricks
+ new_bricks = []
+ removed_bricks = []
+ all_bricks = []
+ bricks_in_volume = volumes[volume_name]['bricks']
+
+ for node in cluster:
+ for brick_path in brick_paths:
+ brick = '%s:%s' % (node, brick_path)
+ all_bricks.append(brick)
+ if brick not in bricks_in_volume:
+ new_bricks.append(brick)
+
+ if not new_bricks and len(all_bricks) > 0 and \
+ len(all_bricks) < len(bricks_in_volume):
+ for brick in bricks_in_volume:
+ if brick not in all_bricks:
+ removed_bricks.append(brick)
+
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, stripes, replicas, force)
+ changed = True
+
+ if removed_bricks:
+ if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
+ reduce_config(volume_name, removed_bricks, str(replicas), force)
+ else:
+ remove_bricks(volume_name, removed_bricks, force)
+ changed = True
+
+ # handle quotas
+ if quota:
+ if not volumes[volume_name]['quota']:
+ enable_quota(volume_name)
+ quotas = get_quotas(volume_name, False)
+ if directory not in quotas or quotas[directory] != quota:
+ set_quota(volume_name, directory, quota)
+ changed = True
+
+ # set options
+ for option in options.keys():
+ if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
+ set_volume_option(volume_name, option, options[option])
+ changed = True
+
+ else:
+ module.fail_json(msg='failed to create volume %s' % volume_name)
+
+ if action != 'absent' and volume_name not in volumes:
+ module.fail_json(msg='volume not found %s' % volume_name)
+
+ if action == 'started':
+ if volumes[volume_name]['status'].lower() != 'started':
+ start_volume(volume_name)
+ changed = True
+
+ if action == 'stopped':
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ changed = True
+
+ if changed:
+ volumes = get_volumes()
+ if rebalance:
+ do_rebalance(volume_name)
+
+ facts = {}
+ facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
+
+ module.exit_json(changed=changed, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py
new file mode 100644
index 00000000..c1816e63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: grove
+short_description: Sends a notification to a grove.io channel
+description:
+ - The C(grove) module sends a message for a service to a Grove.io
+ channel.
+options:
+ channel_token:
+ type: str
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ type: str
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message:
+ type: str
+ description:
+ - Message content
+ required: true
+ url:
+ type: str
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ type: str
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- name: Sends a notification to a grove.io channel
+ community.general.grove:
+ channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service: my-app
+ message: 'deployed {{ target }}'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py
new file mode 100644
index 00000000..57030556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gunicorn
+short_description: Run gunicorn with various settings.
+description:
+ - Starts gunicorn with the parameters specified. Common settings for gunicorn
+ configuration are supported. For additional configuration use a config file
+ See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
+ options. It's recommended to always use the chdir option to avoid problems
+ with the location of the app.
+requirements: [gunicorn]
+author:
+ - "Alejandro Gomez (@agmezr)"
+options:
+ app:
+ type: str
+ required: true
+ aliases: ['name']
+ description:
+ - The app module. A name refers to a WSGI callable that should be found in the specified module.
+ venv:
+ type: path
+ aliases: ['virtualenv']
+ description:
+ - 'Path to the virtualenv directory.'
+ config:
+ type: path
+ description:
+ - 'Path to the gunicorn configuration file.'
+ aliases: ['conf']
+ chdir:
+ type: path
+ description:
+ - 'Chdir to specified directory before apps loading.'
+ pid:
+ type: path
+ description:
+ - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
+ pid file will be created to check a successful run of gunicorn.'
+ worker:
+ type: str
+ choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ description:
+ - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
+ user:
+ type: str
+ description:
+ - 'Switch worker processes to run as this user.'
+notes:
+ - If not specified on config file, a temporary error log will be created on /tmp dir.
+ Please make sure you have write access in /tmp dir. Not needed but will help you to
+ identify any problem with configuration.
+'''
+
+EXAMPLES = '''
+- name: Simple gunicorn run example
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+
+- name: Run gunicorn on a virtualenv
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ venv: '/workspace/example/venv'
+
+- name: Run gunicorn with a config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+
+- name: Run gunicorn as ansible user with specified pid and config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+ venv: '/workspace/example/venv'
+ pid: '/workspace/example/gunicorn.pid'
+ user: 'ansible'
+'''
+
+RETURN = '''
+gunicorn:
+ description: process id of gunicorn
+ returned: changed
+ type: str
+ sample: "1234"
+'''
+
+import os
+import time
+
+# import ansible utils
+from ansible.module_utils.basic import AnsibleModule
+
+
+def search_existing_config(config, option):
+ ''' search in config file for specified option '''
+ if config and os.path.isfile(config):
+ data_config = None
+ with open(config, 'r') as f:
+ for line in f:
+ if option in line:
+ return line
+ return None
+
+
+def remove_tmp_file(file_path):
+ ''' remove temporary files '''
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+def main():
+
+ # available gunicorn options on module
+ gunicorn_options = {
+ 'config': '-c',
+ 'chdir': '--chdir',
+ 'worker': '-k',
+ 'user': '-u',
+ }
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ app=dict(required=True, type='str', aliases=['name']),
+ venv=dict(required=False, type='path', default=None, aliases=['virtualenv']),
+ config=dict(required=False, default=None, type='path', aliases=['conf']),
+ chdir=dict(required=False, type='path', default=None),
+ pid=dict(required=False, type='path', default=None),
+ user=dict(required=False, type='str'),
+ worker=dict(required=False,
+ type='str',
+ choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ ),
+ )
+ )
+
+ # temporary files in case no option provided
+ tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
+ tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
+
+ # remove temp file if exists
+ remove_tmp_file(tmp_pid_file)
+ remove_tmp_file(tmp_error_log)
+
+ # obtain app name and venv
+ params = module.params
+ app = params['app']
+ venv = params['venv']
+ pid = params['pid']
+
+ # use venv path if exists
+ if venv:
+ gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
+ else:
+ gunicorn_command = 'gunicorn'
+
+ # to daemonize the process
+ options = ["-D"]
+
+ # fill options
+ for option in gunicorn_options:
+ param = params[option]
+ if param:
+ options.append(gunicorn_options[option])
+ options.append(param)
+
+ error_log = search_existing_config(params['config'], 'errorlog')
+ if not error_log:
+ # place error log somewhere in case of fail
+ options.append("--error-logfile")
+ options.append(tmp_error_log)
+
+ pid_file = search_existing_config(params['config'], 'pid')
+ if not params['pid'] and not pid_file:
+ pid = tmp_pid_file
+
+ # add option for pid file if not found on config file
+ if not pid_file:
+ options.append('--pid')
+ options.append(pid)
+
+ # put args together
+ args = [gunicorn_command] + options + [app]
+ rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
+
+ if not err:
+ # wait for gunicorn to dump to log
+ time.sleep(0.5)
+ if os.path.isfile(pid):
+ with open(pid, 'r') as f:
+ result = f.readline().strip()
+
+ if not params['pid']:
+ os.remove(pid)
+
+ module.exit_json(changed=True, pid=result, debug=" ".join(args))
+ else:
+ # if user defined own error log, check that
+ if error_log:
+ error = 'Please check your {0}'.format(error_log.strip())
+ else:
+ if os.path.isfile(tmp_error_log):
+ with open(tmp_error_log, 'r') as f:
+ error = f.read()
+ # delete tmp log
+ os.remove(tmp_error_log)
+ else:
+ error = "Log not found"
+
+ module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
+
+ else:
+ module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py
new file mode 100644
index 00000000..848cc1fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: haproxy
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
+author:
+- Ravi Bhure (@ravibhure)
+description:
+ - Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
+notes:
+ - Enable, disable and drain commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
+ - Depends on netcat (nc) being available; you need to install the appropriate
+ package for your operating system before this module can be used.
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ - If this parameter is unset, it will be auto-detected.
+ type: str
+ drain:
+ description:
+ - Wait until the server has no active connections or until the timeout
+ determined by wait_interval and wait_retries is reached.
+ - Continue only after the status changes to 'MAINT'.
+ - This overrides the shutdown_sessions option.
+ type: bool
+ default: false
+ host:
+ description:
+ - Name of the backend host to change.
+ type: str
+ required: true
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server.
+ - This can be used to terminate long-running sessions after a server is put
+ into maintenance mode. Overridden by the drain option.
+ type: bool
+ default: no
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ type: path
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ - Note that C(drain) state was added in version 2.4.
+ - It is supported only by HAProxy version 1.5 or later,
+ - When used on versions < 1.5, it will be ignored.
+ type: str
+ required: true
+ choices: [ disabled, drain, enabled ]
+ agent:
+ description:
+ - Disable/enable agent checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: 1.0.0
+ health:
+ description:
+ - Disable/enable health checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: "1.0.0"
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ type: bool
+ default: no
+ wait:
+ description:
+ - Wait until the server reports a status of 'UP' when C(state=enabled),
+ status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain)
+ type: bool
+ default: no
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ type: int
+ default: 5
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ type: int
+ default: 25
+ weight:
+ description:
+ - The value passed in argument.
+ - If the value ends with the `%` sign, then the new weight will be
+ relative to the initially configured weight.
+ - Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Disable server in 'www' backend pool
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Disable server in 'www' backend pool, also stop health/agent checks
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ health: yes
+ agent: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool)
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+
+- name: Disable server, provide socket file
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+
+- name: Disable server, provide socket file, wait until status reports in maintenance
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+
+# Place server in drain mode, providing a socket file. Then check the server's
+# status every minute to see if it changes to maintenance mode, continuing if it
+# does in an hour and failing otherwise.
+- community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+ drain: yes
+ wait_interval: 1
+ wait_retries: 60
+
+- name: Disable backend server in 'www' backend pool and drop open sessions to it
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: yes
+
+- name: Enable server in 'www' backend pool
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Enable server in 'www' backend pool wait until healthy
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+
+- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+ wait_retries: 10
+ wait_interval: 5
+
+- name: Enable server in 'www' backend pool with change server(s) weight
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
+
+- name: Set the server in 'www' backend pool to drain mode
+ community.general.haproxy:
+ state: drain
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+'''
+
+import csv
+import socket
+import time
+from string import Template
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_text
+
+
+DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled', 'drain']
+WAIT_RETRIES = 25
+WAIT_INTERVAL = 5
+
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.agent = self.module.params['agent']
+ self.health = self.module.params['health']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self._drain = self.module.params['drain']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall(to_bytes('%s\n' % cmd))
+
+ result = b''
+ buf = b''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+ def discover_version(self):
+ """
+ Attempt to extract the haproxy version.
+ Return a tuple containing major and minor version.
+ """
+ data = self.execute('show info', 200, False)
+ lines = data.splitlines()
+ line = [x for x in lines if 'Version:' in x]
+ try:
+ version_values = line[0].partition(':')[2].strip().split('.', 3)
+ version = (int(version_values[0]), int(version_values[1]))
+ except (ValueError, TypeError, IndexError):
+ version = None
+
+ return version
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found) and state is None:
+ self.module.fail_json(
+ msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ if state is not None:
+ self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
+ filter(lambda d: (pxname is None or d['pxname']
+ == pxname) and d['svname'] == svname, r)
+ )
+ )
+ return state or None
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
+ if status in state[0]['status']:
+ if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state):
+ return True
+ else:
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
+ (pxname, svname, status, self.wait_retries))
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if self.agent:
+ cmd += "; enable agent $pxname/$svname"
+ if self.health:
+ cmd += "; enable health $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname"
+ if self.agent:
+ cmd += "; disable agent $pxname/$svname"
+ if self.health:
+ cmd += "; disable health $pxname/$svname"
+ cmd += "; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+ def drain(self, host, backend, status='DRAIN'):
+ """
+ Drain action, sets the server to DRAIN mode.
+ In this mode mode, the server will not accept any new connections
+ other than those that are accepted via persistence.
+ """
+ haproxy_version = self.discover_version()
+
+ # check if haproxy version suppots DRAIN state (starting with 1.5)
+ if haproxy_version and (1, 5) <= haproxy_version:
+ cmd = "set server $pxname/$svname state drain"
+ self.execute_for_backends(cmd, backend, host, status)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
+
+ # toggle enable/disbale server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled' and self._drain:
+ self.drain(self.host, self.backend, status='MAINT')
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ elif self.state == 'drain':
+ self.drain(self.host, self.backend)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
+
+ # Report change status
+ self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
+
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=ACTION_CHOICES),
+ host=dict(type='str', required=True),
+ backend=dict(type='str'),
+ weight=dict(type='str'),
+ socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(type='bool', default=False),
+ fail_on_not_found=dict(type='bool', default=False),
+ health=dict(type='bool', default=False),
+ agent=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_retries=dict(type='int', default=WAIT_RETRIES),
+ wait_interval=dict(type='int', default=WAIT_INTERVAL),
+ drain=dict(type='bool', default=False),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py
new file mode 100644
index 00000000..dd592d6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# (c) 2016, Flavio Percoco <flavio@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: For more details https://github.com/ansible/ansible/issues/61546.
+ alternative: Use M(community.kubernetes.helm) instead.
+module: helm
+short_description: Manages Kubernetes packages with the Helm package manager
+author: "Flavio Percoco (@flaper87)"
+description:
+ - Install, upgrade, delete and list packages with the Helm package manager.
+requirements:
+ - "pyhelm"
+ - "grpcio"
+options:
+ host:
+ description:
+ - Tiller's server host.
+ type: str
+ default: "localhost"
+ port:
+ description:
+ - Tiller's server port.
+ type: int
+ default: 44134
+ namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ type: str
+ default: "default"
+ name:
+ description:
+ - Release name to manage.
+ type: str
+ state:
+ description:
+ - Whether to install C(present), remove C(absent), or purge C(purged) a package.
+ choices: ['absent', 'purged', 'present']
+ type: str
+ default: "present"
+ chart:
+ description:
+ - A map describing the chart to install. See examples for available options.
+ type: dict
+ default: {}
+ values:
+ description:
+ - A map of value options for the chart.
+ type: dict
+ default: {}
+ disable_hooks:
+ description:
+ - Whether to disable hooks during the uninstall process.
+ type: bool
+ default: 'no'
+'''
+
+RETURN = ''' # '''
+
+EXAMPLES = '''
+- name: Install helm chart
+ community.general.helm:
+ host: localhost
+ chart:
+ name: memcached
+ version: 0.4.0
+ source:
+ type: repo
+ location: https://kubernetes-charts.storage.googleapis.com
+ state: present
+ name: my-memcached
+ namespace: default
+
+- name: Uninstall helm chart
+ community.general.helm:
+ host: localhost
+ state: absent
+ name: my-memcached
+
+- name: Install helm chart from a git repo
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/user/helm-chart.git
+ state: present
+ name: my-example
+ namespace: default
+ values:
+ foo: "bar"
+
+- name: Install helm chart from a git repo specifying path
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/helm/charts.git
+ path: stable/memcached
+ state: present
+ name: my-memcached
+ namespace: default
+ values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
+'''
+
+import traceback
+HELM_IMPORT_ERR = None
+try:
+ import grpc
+ from pyhelm import tiller
+ from pyhelm import chartbuilder
+except ImportError:
+ HELM_IMPORT_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def install(module, tserver):
+ changed = False
+ params = module.params
+ name = params['name']
+ values = params['values']
+ chart = module.params['chart']
+ namespace = module.params['namespace']
+
+ chartb = chartbuilder.ChartBuilder(chart)
+ r_matches = (x for x in tserver.list_releases()
+ if x.name == name and x.namespace == namespace)
+ installed_release = next(r_matches, None)
+ if installed_release:
+ if installed_release.chart.metadata.version != chart['version']:
+ tserver.update_release(chartb.get_helm_chart(), False,
+ namespace, name=name, values=values)
+ changed = True
+ else:
+ tserver.install_release(chartb.get_helm_chart(), namespace,
+ dry_run=False, name=name,
+ values=values)
+ changed = True
+
+ return dict(changed=changed)
+
+
+def delete(module, tserver, purge=False):
+ changed = False
+ params = module.params
+
+ if not module.params['name']:
+ module.fail_json(msg='Missing required field name')
+
+ name = module.params['name']
+ disable_hooks = params['disable_hooks']
+
+ try:
+ tserver.uninstall_release(name, disable_hooks, purge)
+ changed = True
+ except grpc._channel._Rendezvous as exc:
+ if 'not found' not in str(exc):
+ raise exc
+
+ return dict(changed=changed)
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=44134),
+ name=dict(type='str', default=''),
+ chart=dict(type='dict'),
+ state=dict(
+ choices=['absent', 'purged', 'present'],
+ default='present'
+ ),
+ # Install options
+ values=dict(type='dict'),
+ namespace=dict(type='str', default='default'),
+
+ # Uninstall options
+ disable_hooks=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True)
+
+ if HELM_IMPORT_ERR:
+ module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
+
+ host = module.params['host']
+ port = module.params['port']
+ state = module.params['state']
+ tserver = tiller.Tiller(host, port)
+
+ if state == 'present':
+ rst = install(module, tserver)
+
+ if state in 'absent':
+ rst = delete(module, tserver)
+
+ if state in 'purged':
+ rst = delete(module, tserver, True)
+
+ module.exit_json(**rst)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
new file mode 100644
index 00000000..276b5b12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: heroku_collaborator
+short_description: "Add or delete app collaborators on Heroku"
+description:
+ - Manages collaborators for Heroku apps.
+ - If set to C(present) and heroku user is already collaborator, then do nothing.
+ - If set to C(present) and heroku user is not collaborator, then add user to app.
+ - If set to C(absent) and heroku user is collaborator, then delete user from app.
+author:
+ - Marcel Arns (@marns93)
+requirements:
+ - heroku3
+options:
+ api_key:
+ type: str
+ description:
+ - Heroku API key
+ apps:
+ type: list
+ description:
+ - List of Heroku App names
+ required: true
+ suppress_invitation:
+ description:
+ - Suppress email invitation when creating collaborator
+ type: bool
+ default: "no"
+ user:
+ type: str
+ description:
+ - User ID or e-mail
+ required: true
+ state:
+ type: str
+ description:
+ - Create or remove the heroku collaborator
+ choices: ["present", "absent"]
+ default: "present"
+notes:
+ - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
+ - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+'''
+
+EXAMPLES = '''
+- name: Create a heroku collaborator
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: max.mustermann@example.com
+ apps: heroku-example-app
+ state: present
+
+- name: An example of using the module in loop
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: '{{ item.user }}'
+ apps: '{{ item.apps | default(apps) }}'
+ suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
+ state: '{{ item.state | default("present") }}'
+ with_items:
+ - { user: 'a.b@example.com' }
+ - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
+ - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
+
+
+def add_or_delete_heroku_collaborator(module, client):
+ user = module.params['user']
+ state = module.params['state']
+ affected_apps = []
+ result_state = False
+
+ for app in module.params['apps']:
+ if app not in client.apps():
+ module.fail_json(msg='App {0} does not exist'.format(app))
+
+ heroku_app = client.apps()[app]
+
+ heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
+
+ if state == 'absent' and user in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.remove_collaborator(user)
+ affected_apps += [app]
+ result_state = True
+ elif state == 'present' and user not in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
+ affected_apps += [app]
+ result_state = True
+
+ return result_state, affected_apps
+
+
+def main():
+ argument_spec = HerokuHelper.heroku_argument_spec()
+ argument_spec.update(
+ user=dict(required=True, type='str'),
+ apps=dict(required=True, type='list'),
+ suppress_invitation=dict(default=False, type='bool'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HerokuHelper(module).get_heroku_client()
+
+ has_changed, msg = add_or_delete_heroku_collaborator(module, client)
+ module.exit_json(changed=has_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py
new file mode 100644
index 00000000..a57e0ab8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip
+short_description: Manage Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip_info
+ description: Retrieve information on failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+ state:
+ description:
+ - Defines whether the IP will be routed or not.
+ - If set to C(routed), I(value) must be specified.
+ type: str
+ choices:
+ - routed
+ - unrouted
+ default: routed
+ value:
+ description:
+ - The new value for the failover IP address.
+ - Required when setting I(state) to C(routed).
+ type: str
+ timeout:
+ description:
+ - Timeout to use when routing or unrouting the failover IP.
+ - Note that the API call returns when the failover IP has been
+ successfully routed to the new address, respectively successfully
+ unrouted.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Set value of failover IP 1.2.3.4 to 5.6.7.8
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+
+- name: Set value of failover IP 1.2.3.4 to unrouted
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ state: unrouted
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover,
+ set_failover,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ state=dict(type='str', default='routed', choices=['routed', 'unrouted']),
+ value=dict(type='str'),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'routed', ['value']),
+ ),
+ )
+
+ failover_ip = module.params['failover_ip']
+ value = get_failover(module, failover_ip)
+ changed = False
+ before = get_failover_state(value)
+
+ if module.params['state'] == 'routed':
+ new_value = module.params['value']
+ else:
+ new_value = None
+
+ if value != new_value:
+ if module.check_mode:
+ value = new_value
+ changed = True
+ else:
+ value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout'])
+
+ after = get_failover_state(value)
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ **after
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py
new file mode 100644
index 00000000..4d6f9f37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip_info
+short_description: Retrieve information on Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Retrieve information on Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip
+ description: Manage failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+'''
+
+EXAMPLES = r'''
+- name: Get value of failover IP 1.2.3.4
+ community.general.hetzner_failover_ip_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+ register: result
+
+- name: Print value of failover IP 1.2.3.4 in case it is routed
+ ansible.builtin.debug:
+ msg: "1.2.3.4 routes to {{ result.value }}"
+ when: result.state == 'routed'
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+failover_ip:
+ description:
+ - The failover IP.
+ returned: success
+ type: str
+ sample: '1.2.3.4'
+failover_netmask:
+ description:
+ - The netmask for the failover IP.
+ returned: success
+ type: str
+ sample: '255.255.255.255'
+server_ip:
+ description:
+ - The main IP of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: str
+server_number:
+ description:
+ - The number of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: int
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover_record,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ failover = get_failover_record(module, module.params['failover_ip'])
+ result = get_failover_state(failover['active_server_ip'])
+ result['failover_ip'] = failover['ip']
+ result['failover_netmask'] = failover['netmask']
+ result['server_ip'] = failover['server_ip']
+ result['server_number'] = failover['server_number']
+ result['changed'] = False
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py
new file mode 100644
index 00000000..ade9bd95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py
@@ -0,0 +1,509 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+ - Note that idempotency check for TCP flags simply compares strings and doesn't
+ try to interpret the rules. This might change in the future.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall_info
+ description: Retrieve information on firewall configuration.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ required: yes
+ type: str
+ port:
+ description:
+ - Switch port of firewall.
+ type: str
+ choices: [ main, kvm ]
+ default: main
+ state:
+ description:
+ - Status of the firewall.
+ - Firewall is active if state is C(present), and disabled if state is C(absent).
+ type: str
+ default: present
+ choices: [ present, absent ]
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ suboptions:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ ip_version:
+ description:
+ - Internet protocol version.
+ - Note that currently, only IPv4 is supported by Hetzner.
+ required: yes
+ type: str
+ choices: [ ipv4, ipv6 ]
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ - Flags supported by Hetzner are C(syn), C(fin), C(rst), C(psh) and C(urg).
+ - They can be combined with C(|) (logical or) and C(&) (logical and).
+ - See L(the documentation,https://wiki.hetzner.de/index.php/Robot_Firewall/en#Parameter)
+ for more information.
+ type: str
+ action:
+ description:
+ - Action if rule matches.
+ required: yes
+ type: str
+ choices: [ accept, discard ]
+ update_timeout:
+ description:
+ - Timeout to use when configuring the firewall.
+ - Note that the API call returns before the firewall has been
+ successfully set up.
+ type: int
+ default: 30
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Configure firewall for server with main IP 1.2.3.4
+ community.general.hetzner_firewall:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ state: present
+ whitelist_hos: yes
+ rules:
+ input:
+ - name: Allow everything to ports 20-23 from 4.3.2.1/24
+ ip_version: ipv4
+ src_ip: 4.3.2.1/24
+ dst_port: '20-23'
+ action: accept
+ - name: Allow everything to port 443
+ ip_version: ipv4
+ dst_port: '443'
+ action: accept
+ - name: Drop everything else
+ ip_version: ipv4
+ action: discard
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.compat import ipaddress as compat_ipaddress
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+
+
+RULE_OPTION_NAMES = [
+ 'name', 'ip_version', 'dst_ip', 'dst_port', 'src_ip', 'src_port',
+ 'protocol', 'tcp_flags', 'action',
+]
+
+RULES = ['input']
+
+
+def restrict_dict(dictionary, fields):
+ result = dict()
+ for k, v in dictionary.items():
+ if k in fields:
+ result[k] = v
+ return result
+
+
+def restrict_firewall_config(config):
+ result = restrict_dict(config, ['port', 'status', 'whitelist_hos'])
+ result['rules'] = dict()
+ for ruleset in RULES:
+ result['rules'][ruleset] = [
+ restrict_dict(rule, RULE_OPTION_NAMES)
+ for rule in config['rules'].get(ruleset) or []
+ ]
+ return result
+
+
+def update(before, after, params, name):
+ bv = before.get(name)
+ after[name] = bv
+ changed = False
+ pv = params[name]
+ if pv is not None:
+ changed = pv != bv
+ if changed:
+ after[name] = pv
+ return changed
+
+
+def normalize_ip(ip, ip_version):
+ if ip is None:
+ return ip
+ if '/' in ip:
+ ip, range = ip.split('/')
+ else:
+ ip, range = ip, ''
+ ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
+ if range == '':
+ range = '32' if ip_version.lower() == 'ipv4' else '128'
+ return ip_addr + '/' + range
+
+
+def update_rules(before, after, params, ruleset):
+ before_rules = before['rules'][ruleset]
+ after_rules = after['rules'][ruleset]
+ params_rules = params['rules'][ruleset]
+ changed = len(before_rules) != len(params_rules)
+ for no, rule in enumerate(params_rules):
+ rule['src_ip'] = normalize_ip(rule['src_ip'], rule['ip_version'])
+ rule['dst_ip'] = normalize_ip(rule['dst_ip'], rule['ip_version'])
+ if no < len(before_rules):
+ before_rule = before_rules[no]
+ before_rule['src_ip'] = normalize_ip(before_rule['src_ip'], before_rule['ip_version'])
+ before_rule['dst_ip'] = normalize_ip(before_rule['dst_ip'], before_rule['ip_version'])
+ if before_rule != rule:
+ changed = True
+ after_rules.append(rule)
+ return changed
+
+
+def encode_rule(output, rulename, input):
+ for i, rule in enumerate(input['rules'][rulename]):
+ for k, v in rule.items():
+ if v is not None:
+ output['rules[{0}][{1}][{2}]'.format(rulename, i, k)] = v
+
+
+def create_default_rules_object():
+ rules = dict()
+ for ruleset in RULES:
+ rules[ruleset] = []
+ return rules
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ port=dict(type='str', default='main', choices=['main', 'kvm']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ whitelist_hos=dict(type='bool'),
+ rules=dict(type='dict', options=dict(
+ input=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ ip_version=dict(type='str', required=True, choices=['ipv4', 'ipv6']),
+ dst_ip=dict(type='str'),
+ dst_port=dict(type='str'),
+ src_ip=dict(type='str'),
+ src_port=dict(type='str'),
+ protocol=dict(type='str'),
+ tcp_flags=dict(type='str'),
+ action=dict(type='str', required=True, choices=['accept', 'discard']),
+ )),
+ )),
+ update_timeout=dict(type='int', default=30),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Sanitize input
+ module.params['status'] = 'active' if (module.params['state'] == 'present') else 'disabled'
+ if module.params['rules'] is None:
+ module.params['rules'] = {}
+ if module.params['rules'].get('input') is None:
+ module.params['rules']['input'] = []
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+ if not firewall_configured(result, error):
+ module.fail_json(msg='Firewall configuration cannot be read as it is not configured.')
+
+ full_before = result['firewall']
+ if not full_before.get('rules'):
+ full_before['rules'] = create_default_rules_object()
+ before = restrict_firewall_config(full_before)
+
+ # Build wanted (after) state and compare
+ after = dict(before)
+ changed = False
+ changed |= update(before, after, module.params, 'port')
+ changed |= update(before, after, module.params, 'status')
+ changed |= update(before, after, module.params, 'whitelist_hos')
+ after['rules'] = create_default_rules_object()
+ if module.params['status'] == 'active':
+ for ruleset in RULES:
+ changed |= update_rules(before, after, module.params, ruleset)
+
+ # Update if different
+ construct_result = True
+ construct_status = None
+ if changed and not module.check_mode:
+ # https://robot.your-server.de/doc/webservice/en.html#post-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(after)
+ data['whitelist_hos'] = str(data['whitelist_hos']).lower()
+ del data['rules']
+ for ruleset in RULES:
+ encode_rule(data, ruleset, after)
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=module.params['update_timeout'],
+ data=urlencode(data),
+ headers=headers,
+ )
+ if module.params['wait_for_configured'] and not firewall_configured(result, error):
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ skip_first=True,
+ )
+ except CheckDoneTimeoutException as e:
+ result, error = e.result, e.error
+ module.warn('Timeout while waiting for firewall to be configured.')
+
+ full_after = result['firewall']
+ if not full_after.get('rules'):
+ full_after['rules'] = create_default_rules_object()
+ construct_status = full_after['status']
+ if construct_status != 'in process':
+ # Only use result if configuration is done, so that diff will be ok
+ after = restrict_firewall_config(full_after)
+ construct_result = False
+
+ if construct_result:
+ # Construct result (used for check mode, and configuration still in process)
+ full_after = dict(full_before)
+ for k, v in after.items():
+ if k != 'rules':
+ full_after[k] = after[k]
+ if construct_status is not None:
+ # We want 'in process' here
+ full_after['status'] = construct_status
+ full_after['rules'] = dict()
+ for ruleset in RULES:
+ full_after['rules'][ruleset] = after['rules'][ruleset]
+
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ firewall=full_after,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py
new file mode 100644
index 00000000..fde06a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall_info
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall
+ description: Configure firewall.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ type: str
+ required: yes
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Get firewall configuration for server with main IP 1.2.3.4
+ community.general.hetzner_firewall_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.firewall }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+
+ firewall = result['firewall']
+ if not firewall.get('rules'):
+ firewall['rules'] = dict()
+ for ruleset in ['input']:
+ firewall['rules'][ruleset] = []
+
+ module.exit_json(
+ changed=False,
+ firewall=firewall,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py
new file mode 100644
index 00000000..5c084d3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Yeukhon Wong <yeukhon@acm.org>
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: hg
+short_description: Manages Mercurial (hg) repositories
+description:
+ - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
+author: "Yeukhon Wong (@yeukhon)"
+options:
+ repo:
+ description:
+ - The repository address.
+ required: yes
+ aliases: [ name ]
+ dest:
+ description:
+ - Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
+ revision:
+ description:
+ - Equivalent C(-r) option in hg command which could be the changeset, revision number,
+ branch name or even tag.
+ aliases: [ version ]
+ force:
+ description:
+ - Discards uncommitted changes. Runs C(hg update -C). Prior to
+ 1.9, the default was `yes`.
+ type: bool
+ default: 'no'
+ purge:
+ description:
+ - Deletes untracked files. Runs C(hg purge).
+ type: bool
+ default: 'no'
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository
+ type: bool
+ default: 'yes'
+ clone:
+ description:
+ - If C(no), do not clone the repository if it does not exist locally.
+ type: bool
+ default: 'yes'
+ executable:
+ description:
+ - Path to hg executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+notes:
+ - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such,
+ if the underlying system still uses a Python version below 2.7.9, you will have issues checking out
+ bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+'''
+
+EXAMPLES = '''
+- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any.
+ community.general.hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally.
+ community.general.hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: no
+ update: no
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Hg(object):
+ def __init__(self, module, dest, repo, revision, hg_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.hg_path = hg_path
+
+ def _command(self, args_list):
+ (rc, out, err) = self.module.run_command([self.hg_path] + args_list)
+ return (rc, out, err)
+
+ def _list_untracked(self):
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
+ return self._command(args)
+
+ def get_revision(self):
+ """
+ hg id -b -i -t returns a string in the format:
+ "<changeset>[+] <branch_name> <tag>"
+ This format lists the state of the current working copy,
+ and indicates whether there are uncommitted changes by the
+ plus sign. Otherwise, the sign is omitted.
+
+ Read the full description via hg id --help
+ """
+ (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def has_local_mods(self):
+ now = self.get_revision()
+ if '+' in now:
+ return True
+ else:
+ return False
+
+ def discard(self):
+ before = self.has_local_mods()
+ if not before:
+ return False
+
+ args = ['update', '-C', '-R', self.dest, '-r', '.']
+ (rc, out, err) = self._command(args)
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ after = self.has_local_mods()
+ if before != after and not after: # no more local modification
+ return True
+
+ def purge(self):
+ # before purge, find out if there are any untracked files
+ (rc1, out1, err1) = self._list_untracked()
+ if rc1 != 0:
+ self.module.fail_json(msg=err1)
+
+ # there are some untrackd files
+ if out1 != '':
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
+ (rc2, out2, err2) = self._command(args)
+ if rc2 != 0:
+ self.module.fail_json(msg=err2)
+ return True
+ else:
+ return False
+
+ def cleanup(self, force, purge):
+ discarded = False
+ purged = False
+
+ if force:
+ discarded = self.discard()
+ if purge:
+ purged = self.purge()
+ if discarded or purged:
+ return True
+ else:
+ return False
+
+ def pull(self):
+ return self._command(
+ ['pull', '-R', self.dest, self.repo])
+
+ def update(self):
+ if self.revision is not None:
+ return self._command(['update', '-r', self.revision, '-R', self.dest])
+ return self._command(['update', '-R', self.dest])
+
+ def clone(self):
+ if self.revision is not None:
+ return self._command(['clone', self.repo, self.dest, '-r', self.revision])
+ return self._command(['clone', self.repo, self.dest])
+
+ @property
+ def at_revision(self):
+ """
+ There is no point in pulling from a potentially down/slow remote site
+ if the desired changeset is already the current changeset.
+ """
+ if self.revision is None or len(self.revision) < 7:
+ # Assume it's a rev number, tag, or branch
+ return False
+ (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ if out.startswith(self.revision):
+ return True
+ return False
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True, aliases=['name']),
+ dest=dict(type='path'),
+ revision=dict(type='str', default=None, aliases=['version']),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ update=dict(type='bool', default=True),
+ clone=dict(type='bool', default=True),
+ executable=dict(type='str', default=None),
+ ),
+ )
+ repo = module.params['repo']
+ dest = module.params['dest']
+ revision = module.params['revision']
+ force = module.params['force']
+ purge = module.params['purge']
+ update = module.params['update']
+ clone = module.params['clone']
+ hg_path = module.params['executable'] or module.get_bin_path('hg', True)
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
+
+ # initial states
+ before = ''
+ changed = False
+ cleaned = False
+
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
+
+ hg = Hg(module, dest, repo, revision, hg_path)
+
+ # If there is no hgrc file, then assume repo is absent
+ # and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
+ if not os.path.exists(hgrc):
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ before = hg.get_revision()
+ elif hg.at_revision:
+ # no update needed, don't pull
+ before = hg.get_revision()
+
+ # but force and purge if desired
+ cleaned = hg.cleanup(force, purge)
+ else:
+ # get the current state before doing pulling
+ before = hg.get_revision()
+
+ # can perform force and purge
+ cleaned = hg.cleanup(force, purge)
+
+ (rc, out, err) = hg.pull()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ (rc, out, err) = hg.update()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ after = hg.get_revision()
+ if before != after or cleaned:
+ changed = True
+
+ module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py
new file mode 100644
index 00000000..06c9fca4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hipchat
+short_description: Send a message to Hipchat.
+description:
+ - Send a message to a Hipchat room, with options to control the formatting.
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - ID or name of the room.
+ required: true
+ msg_from:
+ type: str
+ description:
+ - Name the message will appear to be sent from. Max length is 15
+ characters - above this it will be truncated.
+ default: Ansible
+ aliases: [from]
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ color:
+ type: str
+ description:
+ - Background color for the message.
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ type: str
+ description:
+ - Message format.
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - If true, a notification will be triggered for users in the room.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ api:
+ type: str
+ description:
+ - API url if using a self-hosted hipchat server. For Hipchat API version
+ 2 use the default URI with C(/v2) instead of C(/v1).
+ default: 'https://api.hipchat.com/v1'
+
+author:
+- Shirou Wakayama (@shirou)
+- Paul Bourdel (@pb8226)
+'''
+
+EXAMPLES = '''
+- name: Send a message to a Hipchat room
+ community.general.hipchat:
+ room: notif
+ msg: Ansible task finished
+
+- name: Send a message to a Hipchat room using Hipchat API version 2
+ community.general.hipchat:
+ api: https://api.hipchat.com/v2/
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: Ansible task finished
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py
new file mode 100644
index 00000000..21dea647
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py
@@ -0,0 +1,971 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
+ - homebrew must already be installed on the target system
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+options:
+ name:
+ description:
+ - A list of names of packages to install/remove.
+ aliases: [ 'formula', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "A ':' separated list of paths to search for 'brew' executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
+ providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - state of the package.
+ choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ]
+ default: present
+ type: str
+ update_homebrew:
+ description:
+ - update homebrew itself first.
+ type: bool
+ default: no
+ aliases: ['update-brew']
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages.
+ type: bool
+ default: no
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package.
+ aliases: ['options']
+ type: list
+ elements: str
+ upgrade_options:
+ description:
+ - Option flags to upgrade.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- community.general.homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+ update_homebrew: yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: latest
+ update_homebrew: yes
+
+# Update homebrew and upgrade all packages
+- community.general.homebrew:
+ update_homebrew: yes
+ upgrade_all: yes
+
+# Miscellaneous other examples
+- community.general.homebrew:
+ name: foo
+ state: head
+
+- community.general.homebrew:
+ name: foo
+ state: linked
+
+- community.general.homebrew:
+ name: foo
+ state: absent
+
+- community.general.homebrew:
+ name: foo,bar
+ state: absent
+
+- community.general.homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
+
+- name: Use ignored-pinned option while upgrading all
+ community.general.homebrew:
+ upgrade_all: yes
+ upgrade_options: ignored-pinned
+'''
+
+RETURN = '''
+msg:
+ description: if the cache was updated or not
+ returned: always
+ type: str
+ sample: "Changed: 0, Unchanged: 2"
+unchanged_pkgs:
+ description:
+ - List of package names which are unchanged after module run
+ returned: success
+ type: list
+ sample: ["awscli", "ag"]
+ version_added: '0.2.0'
+changed_pkgs:
+ description:
+ - List of package names which are changed after module run
+ returned: success
+ type: list
+ sample: ['git', 'git-cola']
+ version_added: '0.2.0'
+'''
+
+import os.path
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ \- # dashes
+ : # colons (for URLs)
+ @ # at-sign
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, string_types):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, string_types)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None, upgrade_options=None):
+ if not install_options:
+ install_options = list()
+ if not upgrade_options:
+ upgrade_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options,
+ upgrade_options=upgrade_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.changed_pkgs = []
+ self.unchanged_pkgs = []
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew would be updated.'
+ raise HomebrewException(self.message)
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew packages would be upgraded.'
+ raise HomebrewException(self.message)
+ cmd = [self.brew_path, 'upgrade'] + self.upgrade_options
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall', '--force']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ upgrade_options=dict(
+ default=None,
+ type='list',
+ elements='str',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ if not update_homebrew:
+ module.run_command_environ_update.update(
+ dict(HOMEBREW_NO_AUTO_UPDATE="True")
+ )
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ p['upgrade_options'] = p['upgrade_options'] or []
+ upgrade_options = ['--{0}'.format(upgrade_option)
+ for upgrade_option in p['upgrade_options']]
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options,
+ upgrade_options=upgrade_options)
+ (failed, changed, message) = brew.run()
+ changed_pkgs = brew.changed_pkgs
+ unchanged_pkgs = brew.unchanged_pkgs
+
+ if failed:
+ module.fail_json(msg=message)
+ module.exit_json(
+ changed=changed,
+ msg=message,
+ unchanged_pkgs=unchanged_pkgs,
+ changed_pkgs=changed_pkgs
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py
new file mode 100644
index 00000000..feb1ba68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright: (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+- "Indrajit Raychaudhuri (@indrajitr)"
+- "Daniel Jaouen (@danieljaouen)"
+- "Enric Lluelles (@enriclluelles)"
+requirements:
+- "python >= 2.6"
+short_description: Install and uninstall homebrew casks
+description:
+- Manages Homebrew casks.
+options:
+ name:
+ description:
+ - Name of cask to install or remove.
+ aliases: [ 'cask', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - State of the cask.
+ choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ]
+ default: present
+ type: str
+ sudo_password:
+ description:
+ - The sudo password to be passed to SUDO_ASKPASS.
+ required: false
+ type: str
+ update_homebrew:
+ description:
+ - Update homebrew itself first.
+ - Note that C(brew cask update) is a synonym for C(brew update).
+ type: bool
+ default: no
+ aliases: [ 'update-brew' ]
+ install_options:
+ description:
+ - Options flags to install a package.
+ aliases: [ 'options' ]
+ type: list
+ elements: str
+ accept_external_apps:
+ description:
+ - Allow external apps.
+ type: bool
+ default: no
+ upgrade_all:
+ description:
+ - Upgrade all casks.
+ - Mutually exclusive with C(upgraded) state.
+ type: bool
+ default: no
+ aliases: [ 'upgrade' ]
+ greedy:
+ description:
+ - Upgrade casks that auto update.
+ - Passes --greedy to brew cask outdated when checking
+ if an installed cask has a newer version available.
+ type: bool
+ default: no
+'''
+EXAMPLES = '''
+- name: Install cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+
+- name: Remove cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- name: Allow external app
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ accept_external_apps: True
+
+- name: Remove cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
+
+- name: Upgrade all casks
+ community.general.homebrew_cask:
+ upgrade_all: true
+
+- name: Upgrade given cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: upgraded
+ install_options: force
+
+- name: Upgrade cask with greedy option
+ community.general.homebrew_cask:
+ name: 1password
+ state: upgraded
+ greedy: True
+
+- name: Using sudo password for installing cask
+ community.general.homebrew_cask:
+ name: wireshark
+ state: present
+ sudo_password: "{{ ansible_become_pass }}"
+'''
+
+import os
+import re
+import tempfile
+from distutils import version
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \- # dashes
+ @ # at symbol
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, (string_types)):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, string_types)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+
+ @property
+ def brew_version(self):
+ try:
+ return self._brew_version
+ except AttributeError:
+ return None
+
+ @brew_version.setter
+ def brew_version(self, brew_version):
+ self._brew_version = brew_version
+
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ sudo_password=None, update_homebrew=False,
+ install_options=None, accept_external_apps=False,
+ upgrade_all=False, greedy=False):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_outdated(self):
+ if not self.valid_cask(self.current_cask):
+ return False
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'outdated', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'outdated']
+
+ cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask]
+
+ rc, out, err = self.module.run_command(cask_is_outdated_command)
+
+ return out != ""
+
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, "list", "--cask"]
+ else:
+ base_opts = [self.brew_path, "cask", "list"]
+
+ cmd = base_opts + [self.current_cask]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _get_brew_version(self):
+ if self.brew_version:
+ return self.brew_version
+
+ cmd = [self.brew_path, '--version']
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ # get version string from first line of "brew --version" output
+ version = out.split('\n')[0].split(' ')[1]
+ self.brew_version = version
+ return self.brew_version
+
+ def _brew_cask_command_is_deprecated(self):
+ # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
+ return version.LooseVersion(self._get_brew_version()) >= version.LooseVersion('2.6.0')
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.upgrade_all:
+ return self._upgrade_all()
+
+ if self.casks:
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'upgraded':
+ return self._upgrade_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ self.failed = True
+ self.message = "You must select a cask to install."
+ raise HomebrewCaskException(self.message)
+
+ # sudo_password fix ---------------------- {{{
+ def _run_command_with_sudo_password(self, cmd):
+ rc, out, err = '', '', ''
+
+ with tempfile.NamedTemporaryFile() as sudo_askpass_file:
+ sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password))
+ os.chmod(sudo_askpass_file.name, 0o700)
+ sudo_askpass_file.file.close()
+
+ rc, out, err = self.module.run_command(
+ cmd,
+ environ_update={'SUDO_ASKPASS': sudo_askpass_file.name}
+ )
+
+ self.module.add_cleanup_file(sudo_askpass_file.name)
+
+ return (rc, out, err)
+ # /sudo_password fix --------------------- }}}
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Casks would be upgraded.'
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ cmd = [self.brew_path, 'upgrade', '--cask']
+ else:
+ cmd = [self.brew_path, 'cask', 'upgrade']
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
+ self.message = 'Homebrew casks already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew casks upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'install', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'install']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_cask(self):
+ command = 'upgrade'
+
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ command = 'install'
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.message = 'Cask is already upgraded: {0}'.format(
+ self.current_cask,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be upgraded: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, command, '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', command]
+
+ opts = base_opts + self.install_options + [self.current_cask]
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask upgraded: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _upgrade_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._upgrade_current_cask()
+
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'uninstall', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'uninstall']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled --------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ sudo_password=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ accept_external_apps=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ greedy=dict(
+ default=False,
+ type='bool',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ sudo_password = p['sudo_password']
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ greedy = p['greedy']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ accept_external_apps = p['accept_external_apps']
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy,
+ )
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py
new file mode 100644
index 00000000..d31da485
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ type: list
+ elements: str
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ required: false
+ type: str
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+ type: str
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+
+- name: Tap a Homebrew repository, state absent
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- name: Tap a Homebrew repository using url, state present
+ community.general.homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if rc == 0:
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True, elements='str'),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin', '/opt/homebrew/bin']
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of multiple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
new file mode 100644
index 00000000..0b96af04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ environment:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ type: str
+ description:
+ - The username of the person doing the deployment
+ repo:
+ type: str
+ description:
+ - URL of the project repository
+ revision:
+ type: str
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+'''
+
+EXAMPLES = '''
+- name: Notify Honeybadger.io about an app deployment
+ community.general.honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py
new file mode 100644
index 00000000..1e37aee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hpilo_boot
+author: Dag Wieers (@dagwieers)
+short_description: Boot system using specific media through HP iLO interface
+description:
+- "This module boots a system through its HP iLO interface. The boot media
+ can be one of: cdrom, floppy, hdd, network or usb."
+- This module requires the hpilo python module.
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ media:
+ description:
+ - The boot media to boot the system from
+ choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
+ image:
+ description:
+ - The URL of a cdrom, floppy or usb boot media image.
+ protocol://username:password@hostname:port/filename
+ - protocol is either 'http' or 'https'
+ - username:password is optional
+ - port is optional
+ state:
+ description:
+ - The state of the boot media.
+ - "no_boot: Do not boot from the device"
+ - "boot_once: Boot from the device once and then notthereafter"
+ - "boot_always: Boot from the device each time the server is rebooted"
+ - "connect: Connect the virtual media device and set to boot_always"
+ - "disconnect: Disconnects the virtual media device and set to no_boot"
+ - "poweroff: Power off the server"
+ default: boot_once
+ choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
+ force:
+ description:
+ - Whether to force a reboot (even when the system is already booted).
+ - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ default: no
+ type: bool
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- python-hpilo
+notes:
+- To use a USB key image you need to specify floppy as boot media.
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ media: cdrom
+ image: http://some-web-server/iso/boot.iso
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+
+- name: Power off a server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_HOST
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ state: poweroff
+ delegate_to: localhost
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+import time
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
+ image=dict(type='str'),
+ state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
+ force=dict(type='bool', default=False),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ )
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ media = module.params['media']
+ image = module.params['image']
+ state = module.params['state']
+ force = module.params['force']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+ changed = False
+ status = {}
+ power_status = 'UNKNOWN'
+
+ if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
+
+ # Workaround for: Error communicating with iLO: Problem manipulating EV
+ try:
+ ilo.set_one_time_boot(media)
+ except hpilo.IloError:
+ time.sleep(60)
+ ilo.set_one_time_boot(media)
+
+ # TODO: Verify if image URL exists/works
+ if image:
+ ilo.insert_virtual_media(media, image)
+ changed = True
+
+ if media == 'cdrom':
+ ilo.set_vm_status('cdrom', state, True)
+ status = ilo.get_vm_status()
+ changed = True
+ elif media in ('floppy', 'usb'):
+ ilo.set_vf_status(state, True)
+ status = ilo.get_vf_status()
+ changed = True
+
+ # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
+ if state in ('boot_once', 'boot_always') or force:
+
+ power_status = ilo.get_host_power_status()
+
+ if not force and power_status == 'ON':
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+
+ if power_status == 'ON':
+ ilo.warm_boot_server()
+# ilo.cold_boot_server()
+ changed = True
+ else:
+ ilo.press_pwr_btn()
+# ilo.reset_server()
+# ilo.set_host_power(host_power=True)
+ changed = True
+
+ elif state in ('poweroff'):
+
+ power_status = ilo.get_host_power_status()
+
+ if not power_status == 'OFF':
+ ilo.hold_pwr_btn()
+# ilo.set_host_power(host_power=False)
+ changed = True
+
+ module.exit_json(changed=changed, power=power_status, **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py
new file mode 100644
index 00000000..451e4b06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hponcfg
+author: Dag Wieers (@dagwieers)
+short_description: Configure HP iLO interface using hponcfg
+description:
+- This modules configures the HP iLO interface using hponcfg.
+options:
+ path:
+ description:
+ - The XML file as accepted by hponcfg.
+ required: true
+ aliases: ['src']
+ minfw:
+ description:
+ - The minimum firmware level needed.
+ required: false
+ executable:
+ description:
+ - Path to the hponcfg executable (`hponcfg` which uses $PATH).
+ default: hponcfg
+ verbose:
+ description:
+ - Run hponcfg in verbose mode (-v).
+ default: no
+ type: bool
+requirements:
+- hponcfg tool
+notes:
+- You need a working hponcfg on the target system.
+'''
+
+EXAMPLES = r'''
+- name: Example hponcfg configuration XML
+ ansible.builtin.copy:
+ content: |
+ <ribcl VERSION="2.0">
+ <login USER_LOGIN="user" PASSWORD="password">
+ <rib_info MODE="WRITE">
+ <mod_global_settings>
+ <session_timeout value="0"/>
+ <ssh_status value="Y"/>
+ <ssh_port value="22"/>
+ <serial_cli_status value="3"/>
+ <serial_cli_speed value="5"/>
+ </mod_global_settings>
+ </rib_info>
+ </login>
+ </ribcl>
+ dest: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO using enable-ssh.xml
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO on VMware ESXi hypervisor
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+ executable: /opt/hp/tools/hponcfg
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ minfw=dict(type='str'),
+ executable=dict(default='hponcfg', type='str'),
+ verbose=dict(default=False, type='bool'),
+ )
+ )
+
+ # Consider every action a change (not idempotent yet!)
+ changed = True
+
+ src = module.params['src']
+ minfw = module.params['minfw']
+ executable = module.params['executable']
+ verbose = module.params['verbose']
+
+ options = ' -f %s' % src
+
+ if verbose:
+ options += ' -v'
+
+ if minfw:
+ options += ' -m %s' % minfw
+
+ rc, stdout, stderr = module.run_command('%s %s' % (executable, options))
+
+ if rc != 0:
+ module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py
new file mode 100644
index 00000000..6ff04131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: htpasswd
+short_description: manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+options:
+ path:
+ type: path
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ type: str
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ type: str
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ type: str
+ required: false
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ md5_crypt and sha256_crypt, which are linux passwd hashes. If you
+ do so the password file will not be compatible with Apache or Nginx
+ - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext)'
+ state:
+ type: str
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: "yes"
+ description:
+ - Used with C(state=present). If specified, the file will be created
+ if it does not already exist. If set to "no", will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = """
+- name: Add a user to a password file and ensure permissions are set
+ community.general.htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+- name: Remove a user from a password file
+ community.general.htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+- name: Add a user to a password file suitable for use by libpam-pwdfile
+ community.general.htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(type='path', required=True, aliases=["dest", "destfile"]),
+ name=dict(type='str', required=True, aliases=["username"]),
+ password=dict(type='str', required=False, default=None, no_log=True),
+ crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ create=dict(type='bool', default=True),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
new file mode 100644
index 00000000..3d4ba84b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
@@ -0,0 +1,2135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_ecs_instance
+description:
+ - instance management.
+short_description: Creates a resource of Ecs/Instance in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ required: true
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ required: true
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the ECS name. Value requirements consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.).
+ type: str
+ required: true
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. Constraints the
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ required: true
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ required: true
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ required: true
+ suboptions:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ required: true
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements, consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types 'uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ required: false
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ required: true
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ required: false
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be
+ assigned.
+ type: str
+ required: false
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this
+ parameter is left blank, the default security group is bound to
+ the ECS by default.
+ type: list
+ elements: str
+ required: false
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ required: false
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ required: false
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ required: false
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with
+ base64. The maximum size of the content to be injected (before
+ encoding) is 32 KB. For Linux ECSs, this parameter does not take
+ effect when adminPass is used.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an ecs instance
+- name: Create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ register: eip
+- name: Create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ register: disk
+- name: Create an instance
+ community.general.hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ returned: success
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the ECS name. Value requirements "Consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.)".
+ type: str
+ returned: success
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. The
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ returned: success
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID corresponding to the IP address.
+ type: str
+ returned: success
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ returned: success
+ contains:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ returned: success
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types "uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ returned: success
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ returned: success
+ contains:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ returned: success
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be assigned.
+ type: str
+ returned: success
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this parameter is left
+ blank, the default security group is bound to the ECS by default.
+ type: list
+ returned: success
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ returned: success
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ returned: success
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ returned: success
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with base64. The maximum
+ size of the content to be injected (before encoding) is 32 KB. For
+ Linux ECSs, this parameter does not take effect when adminPass is
+ used.
+ type: str
+ returned: success
+ config_drive:
+ description:
+ - Specifies the configuration driver.
+ type: str
+ returned: success
+ created:
+ description:
+ - Specifies the time when an ECS was created.
+ type: str
+ returned: success
+ disk_config_type:
+ description:
+ - Specifies the disk configuration type. MANUAL is The image
+ space is not expanded. AUTO is the image space of the system disk
+ will be expanded to be as same as the flavor.
+ type: str
+ returned: success
+ host_name:
+ description:
+ - Specifies the host name of the ECS.
+ type: str
+ returned: success
+ image_name:
+ description:
+ - Specifies the image name of the ECS.
+ type: str
+ returned: success
+ power_state:
+ description:
+ - Specifies the power status of the ECS.
+ type: int
+ returned: success
+ server_alias:
+ description:
+ - Specifies the ECS alias.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
+ REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
+ and DELETED.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ flavor_name=dict(type='str', required=True),
+ image_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nics=dict(
+ type='list', required=True, elements='dict',
+ options=dict(
+ ip_address=dict(type='str', required=True),
+ subnet_id=dict(type='str', required=True)
+ ),
+ ),
+ root_volume=dict(type='dict', required=True, options=dict(
+ volume_type=dict(type='str', required=True),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ )),
+ vpc_id=dict(type='str', required=True),
+ admin_pass=dict(type='str', no_log=True),
+ data_volumes=dict(type='list', elements='dict', options=dict(
+ volume_id=dict(type='str', required=True),
+ device=dict(type='str')
+ )),
+ description=dict(type='str'),
+ eip_id=dict(type='str'),
+ enable_auto_recovery=dict(type='bool'),
+ enterprise_project_id=dict(type='str'),
+ security_groups=dict(type='list', elements='str'),
+ server_metadata=dict(type='dict'),
+ server_tags=dict(type='dict'),
+ ssh_key_name=dict(type='str'),
+ user_data=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "ecs")
+
+ try:
+ _init(config)
+ is_exist = module.params['id']
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params['id']:
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "admin_pass": module.params.get("admin_pass"),
+ "availability_zone": module.params.get("availability_zone"),
+ "data_volumes": module.params.get("data_volumes"),
+ "description": module.params.get("description"),
+ "eip_id": module.params.get("eip_id"),
+ "enable_auto_recovery": module.params.get("enable_auto_recovery"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "flavor_name": module.params.get("flavor_name"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "nics": module.params.get("nics"),
+ "root_volume": module.params.get("root_volume"),
+ "security_groups": module.params.get("security_groups"),
+ "server_metadata": module.params.get("server_metadata"),
+ "server_tags": module.params.get("server_tags"),
+ "ssh_key_name": module.params.get("ssh_key_name"),
+ "user_data": module.params.get("user_data"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait(config, r, client, timeout)
+
+ sub_job_identity = {
+ "job_type": "createSingleServer",
+ }
+ for item in navigate_value(obj, ["entities", "sub_jobs"]):
+ for k, v in sub_job_identity.items():
+ if item[k] != v:
+ break
+ else:
+ obj = item
+ break
+ else:
+ raise Exception("Can't find the sub job")
+ module.params['id'] = navigate_value(obj, ["entities", "server_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ client = config.client(get_region(module), "ecs", "project")
+
+ params = build_delete_nics_parameters(expect_state)
+ params1 = build_delete_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_delete_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ params = build_set_auto_recovery_parameters(expect_state)
+ params1 = build_set_auto_recovery_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_set_auto_recovery_request(module, params, client)
+
+ params = build_attach_nics_parameters(expect_state)
+ params1 = build_attach_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_attach_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ multi_invoke_delete_volume(config, expect_state, client, timeout)
+
+ multi_invoke_attach_data_disk(config, expect_state, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_delete_parameters(opts)
+ if params:
+ r = send_delete_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ preprocess_read_response(r)
+ res["read"] = fill_read_resp_body(r)
+
+ r = send_read_auto_recovery_request(module, client)
+ res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
+
+ return res, None
+
+
+def preprocess_read_response(resp):
+ v = resp.get("os-extended-volumes:volumes_attached")
+ if v and isinstance(v, list):
+ for i in range(len(v)):
+ if v[i].get("bootIndex") == "0":
+ root_volume = v[i]
+
+ if (i + 1) != len(v):
+ v[i] = v[-1]
+
+ v.pop()
+
+ resp["root_volume"] = root_volume
+ break
+
+ v = resp.get("addresses")
+ if v:
+ rv = {}
+ eips = []
+ for val in v.values():
+ for item in val:
+ if item["OS-EXT-IPS:type"] == "floating":
+ eips.append(item)
+ else:
+ rv[item["OS-EXT-IPS:port_id"]] = item
+
+ for item in eips:
+ k = item["OS-EXT-IPS:port_id"]
+ if k in rv:
+ rv[k]["eip_address"] = item.get("addr", "")
+ else:
+ rv[k] = item
+ item["eip_address"] = item.get("addr", "")
+ item["addr"] = ""
+
+ resp["address"] = rv.values()
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ adjust_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "enterprise_project_id=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={offset}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "cloudservers/detail" + query_link
+
+ result = []
+ p = {'offset': 1}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ adjust_list_resp(identity_obj, item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['offset'] += 1
+
+ return result
+
+
+def build_delete_nics_parameters(opts):
+ params = dict()
+
+ v = expand_delete_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_delete_nics_nics(d, array_index):
+ cv = d["current_state"].get("nics")
+ if not cv:
+ return None
+
+ val = cv
+
+ ev = d.get("nics")
+ if ev:
+ m = [item.get("ip_address") for item in ev]
+ val = [item for item in cv if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("port_id")
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_delete_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics/delete")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_set_auto_recovery_parameters(opts):
+ params = dict()
+
+ v = expand_set_auto_recovery_support_auto_recovery(opts, None)
+ if v is not None:
+ params["support_auto_recovery"] = v
+
+ return params
+
+
+def expand_set_auto_recovery_support_auto_recovery(d, array_index):
+ v = navigate_value(d, ["enable_auto_recovery"], None)
+ return None if v is None else str(v).lower()
+
+
+def send_set_auto_recovery_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(set_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_pass"], None)
+ if not is_empty_value(v):
+ params["adminPass"] = v
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = expand_create_extendparam(opts, None)
+ if not is_empty_value(v):
+ params["extendparam"] = v
+
+ v = navigate_value(opts, ["flavor_name"], None)
+ if not is_empty_value(v):
+ params["flavorRef"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = navigate_value(opts, ["ssh_key_name"], None)
+ if not is_empty_value(v):
+ params["key_name"] = v
+
+ v = navigate_value(opts, ["server_metadata"], None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ v = expand_create_root_volume(opts, None)
+ if not is_empty_value(v):
+ params["root_volume"] = v
+
+ v = expand_create_security_groups(opts, None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ v = expand_create_server_tags(opts, None)
+ if not is_empty_value(v):
+ params["server_tags"] = v
+
+ v = navigate_value(opts, ["user_data"], None)
+ if not is_empty_value(v):
+ params["user_data"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpcid"] = v
+
+ if not params:
+ return params
+
+ params = {"server": params}
+
+ return params
+
+
+def expand_create_extendparam(d, array_index):
+ r = dict()
+
+ r["chargingMode"] = 0
+
+ v = navigate_value(d, ["enterprise_project_id"], array_index)
+ if not is_empty_value(v):
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(d, ["enable_auto_recovery"], array_index)
+ if not is_empty_value(v):
+ r["support_auto_recovery"] = v
+
+ return r
+
+
+def expand_create_nics(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ v = navigate_value(
+ d, ["nics"], new_ai)
+
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_ai["nics"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["nics", "ip_address"], new_ai)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["nics", "subnet_id"], new_ai)
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["eip_id"], array_index)
+ if not is_empty_value(v):
+ r["id"] = v
+
+ return r
+
+
+def expand_create_root_volume(d, array_index):
+ r = dict()
+
+ v = expand_create_root_volume_extendparam(d, array_index)
+ if not is_empty_value(v):
+ r["extendparam"] = v
+
+ v = navigate_value(d, ["root_volume", "size"], array_index)
+ if not is_empty_value(v):
+ r["size"] = v
+
+ v = navigate_value(d, ["root_volume", "volume_type"], array_index)
+ if not is_empty_value(v):
+ r["volumetype"] = v
+
+ return r
+
+
+def expand_create_root_volume_extendparam(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
+ if not is_empty_value(v):
+ r["snapshotId"] = v
+
+ return r
+
+
+def expand_create_security_groups(d, array_index):
+ v = d.get("security_groups")
+ if not v:
+ return None
+
+ return [{"id": i} for i in v]
+
+
+def expand_create_server_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [{"key": k, "value": v1} for k, v1 in v.items()]
+
+
+def send_create_request(module, params, client):
+ url = "cloudservers"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_nics_parameters(opts):
+ params = dict()
+
+ v = expand_attach_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_attach_nics_nics(d, array_index):
+ ev = d.get("nics")
+ if not ev:
+ return None
+
+ val = ev
+
+ cv = d["current_state"].get("nics")
+ if cv:
+ m = [item.get("ip_address") for item in cv]
+ val = [item for item in ev if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("ip_address")
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = item.get("subnet_id")
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_attach_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_volume_request(module, params, client, info):
+ path_parameters = {
+ "volume_id": ["volume_id"],
+ }
+ data = dict((key, navigate_value(info, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_volume), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_data_disk_parameters(opts, array_index):
+ params = dict()
+
+ v = expand_attach_data_disk_volume_attachment(opts, array_index)
+ if not is_empty_value(v):
+ params["volumeAttachment"] = v
+
+ return params
+
+
+def expand_attach_data_disk_volume_attachment(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["data_volumes", "device"], array_index)
+ if not is_empty_value(v):
+ r["device"] = v
+
+ v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
+ if not is_empty_value(v):
+ r["volumeId"] = v
+
+ return r
+
+
+def send_attach_data_disk_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/attachvolume")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_data_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_delete_parameters(opts):
+ params = dict()
+
+ params["delete_publicip"] = False
+
+ params["delete_volume"] = False
+
+ v = expand_delete_servers(opts, None)
+ if not is_empty_value(v):
+ params["servers"] = v
+
+ return params
+
+
+def expand_delete_servers(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = expand_delete_servers_id(d, new_ai)
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_delete_servers_id(d, array_index):
+ return d["ansible_module"].params.get("id")
+
+
+def send_delete_request(module, params, client):
+ url = "cloudservers/delete"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "jobs/{job_id}", result)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_ecs_instance): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def multi_invoke_delete_volume(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = None
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in expect]
+ opts1 = {
+ "data_volumes": [
+ i for i in current if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ r = send_delete_volume_request(module, None, client, loop_val[i])
+ async_wait(config, r, client, timeout)
+
+
+def multi_invoke_attach_data_disk(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = opts
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in current]
+ opts1 = {
+ "data_volumes": [
+ i for i in expect if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
+ r = send_attach_data_disk_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def send_read_request(module, client):
+ url = build_path(module, "cloudservers/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["server"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ v = fill_read_resp_address(body.get("address"))
+ result["address"] = v
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_read_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_os_extended_volumes_volumes_attached(
+ body.get("os-extended-volumes:volumes_attached"))
+ result["os-extended-volumes:volumes_attached"] = v
+
+ v = fill_read_resp_root_volume(body.get("root_volume"))
+ result["root_volume"] = v
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_read_resp_address(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
+
+ val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
+
+ val["addr"] = item.get("addr")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["image_name"] = value.get("image_name")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_os_extended_volumes_volumes_attached(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["bootIndex"] = item.get("bootIndex")
+
+ val["device"] = item.get("device")
+
+ val["id"] = item.get("id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_root_volume(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["device"] = value.get("device")
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def send_read_auto_recovery_request(module, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def fill_read_auto_recovery_resp_body(body):
+ result = dict()
+
+ result["support_auto_recovery"] = body.get("support_auto_recovery")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "config_drive"], array_index)
+ r["config_drive"] = v
+
+ v = navigate_value(response, ["read", "created"], array_index)
+ r["created"] = v
+
+ v = flatten_data_volumes(response, array_index)
+ r["data_volumes"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
+ r["disk_config_type"] = v
+
+ v = flatten_enable_auto_recovery(response, array_index)
+ r["enable_auto_recovery"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "flavor", "id"], array_index)
+ r["flavor_name"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
+ r["host_name"] = v
+
+ v = navigate_value(response, ["read", "image", "id"], array_index)
+ r["image_id"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "image_name"], array_index)
+ r["image_name"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = flatten_nics(response, array_index)
+ r["nics"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-STS:power_state"], array_index)
+ r["power_state"] = v
+
+ v = flatten_root_volume(response, array_index)
+ r["root_volume"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
+ r["server_alias"] = v
+
+ v = flatten_server_tags(response, array_index)
+ r["server_tags"] = v
+
+ v = navigate_value(response, ["read", "key_name"], array_index)
+ r["ssh_key_name"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
+ r["user_data"] = v
+
+ v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def flatten_data_volumes(d, array_index):
+ v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.os-extended-volumes:volumes_attached"] = i
+
+ val = dict()
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
+ val["volume_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_auto_recovery(d, array_index):
+ v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
+ array_index)
+ return v == "true"
+
+
+def flatten_nics(d, array_index):
+ v = navigate_value(d, ["read", "address"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.address"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "address", "addr"], new_ai)
+ val["ip_address"] = v
+
+ v = navigate_value(
+ d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
+ val["port_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_root_volume(d, array_index):
+ result = dict()
+
+ v = navigate_value(d, ["read", "root_volume", "device"], array_index)
+ result["device"] = v
+
+ v = navigate_value(d, ["read", "root_volume", "id"], array_index)
+ result["volume_id"] = v
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return None
+
+
+def flatten_server_tags(d, array_index):
+ v = navigate_value(d, ["read", "tags"], array_index)
+ if not v:
+ return None
+
+ r = dict()
+ for item in v:
+ v1 = item.split("=")
+ if v1:
+ r[v1[0]] = v1[1]
+ return r
+
+
+def adjust_options(opts, states):
+ adjust_data_volumes(opts, states)
+
+ adjust_nics(opts, states)
+
+
+def adjust_data_volumes(parent_input, parent_cur):
+ iv = parent_input.get("data_volumes")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("data_volumes")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["volume_id"] != icv["volume_id"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(data_volumes) failed, "
+ "the array number is not equal")
+
+ parent_cur["data_volumes"] = result
+
+
+def adjust_nics(parent_input, parent_cur):
+ iv = parent_input.get("nics")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("nics")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["ip_address"] != icv["ip_address"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(nics) failed, "
+ "the array number is not equal")
+
+ parent_cur["nics"] = result
+
+
+def set_unreadable_options(opts, states):
+ states["admin_pass"] = opts.get("admin_pass")
+
+ states["eip_id"] = opts.get("eip_id")
+
+ set_unread_nics(
+ opts.get("nics"), states.get("nics"))
+
+ set_unread_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ states["security_groups"] = opts.get("security_groups")
+
+ states["server_metadata"] = opts.get("server_metadata")
+
+
+def set_unread_nics(inputv, curv):
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ if not (curv and isinstance(curv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ cv["subnet_id"] = iv.get("subnet_id")
+
+
+def set_unread_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ curv["size"] = inputv.get("size")
+
+ curv["snapshot_id"] = inputv.get("snapshot_id")
+
+ curv["volume_type"] = inputv.get("volume_type")
+
+
+def set_readonly_options(opts, states):
+ opts["config_drive"] = states.get("config_drive")
+
+ opts["created"] = states.get("created")
+
+ opts["disk_config_type"] = states.get("disk_config_type")
+
+ opts["host_name"] = states.get("host_name")
+
+ opts["image_name"] = states.get("image_name")
+
+ set_readonly_nics(
+ opts.get("nics"), states.get("nics"))
+
+ opts["power_state"] = states.get("power_state")
+
+ set_readonly_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ opts["server_alias"] = states.get("server_alias")
+
+ opts["status"] = states.get("status")
+
+
+def set_readonly_nics(inputv, curv):
+ if not (curv and isinstance(curv, list)):
+ return
+
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ iv["port_id"] = cv.get("port_id")
+
+
+def set_readonly_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ inputv["device"] = curv.get("device")
+
+ inputv["volume_id"] = curv.get("volume_id")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["servers"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = None
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["OS-EXT-AZ:availability_zone"] = v
+
+ result["OS-EXT-SRV-ATTR:hostname"] = None
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = None
+
+ v = navigate_value(all_opts, ["user_data"], None)
+ result["OS-EXT-SRV-ATTR:user_data"] = v
+
+ result["OS-EXT-STS:power_state"] = None
+
+ result["config_drive"] = None
+
+ result["created"] = None
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ v = expand_list_flavor(all_opts, None)
+ result["flavor"] = v
+
+ result["id"] = None
+
+ v = expand_list_image(all_opts, None)
+ result["image"] = v
+
+ v = navigate_value(all_opts, ["ssh_key_name"], None)
+ result["key_name"] = v
+
+ v = expand_list_metadata(all_opts, None)
+ result["metadata"] = v
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["status"] = None
+
+ v = expand_list_tags(all_opts, None)
+ result["tags"] = v
+
+ return result
+
+
+def expand_list_flavor(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["flavor_name"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_image(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [k + "=" + v1 for k, v1 in v.items()]
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_list_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_list_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def adjust_list_resp(opts, resp):
+ adjust_list_api_tags(opts, resp)
+
+
+def adjust_list_api_tags(parent_input, parent_cur):
+ iv = parent_input.get("tags")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("tags")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ result = []
+ for iiv in iv:
+ if iiv not in cv:
+ break
+
+ result.append(iiv)
+
+ j = cv.index(iiv)
+ cv[j] = cv[-1]
+ cv.pop()
+
+ if cv:
+ result.extend(cv)
+ parent_cur["tags"] = result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
new file mode 100644
index 00000000..4aec1b94
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
@@ -0,0 +1,1210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_evs_disk
+description:
+ - block storage management.
+short_description: Creates a resource of Evs/Disk in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ required: true
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ required: true
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ required: false
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ required: false
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ required: false
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ required: false
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ required: false
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ required: false
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# test create disk
+- name: Create a disk
+ community.general.hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ returned: success
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ returned: success
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ returned: success
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ returned: success
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ returned: success
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ returned: success
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ returned: success
+ attachments:
+ description:
+ - Specifies the disk attachment information.
+ type: complex
+ returned: success
+ contains:
+ attached_at:
+ description:
+ - Specifies the time when the disk was attached. Time
+ format is 'UTC YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ attachment_id:
+ description:
+ - Specifies the ID of the attachment information.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the device name.
+ type: str
+ returned: success
+ server_id:
+ description:
+ - Specifies the ID of the server to which the disk is
+ attached.
+ type: str
+ returned: success
+ backup_policy_id:
+ description:
+ - Specifies the backup policy ID.
+ type: str
+ returned: success
+ created_at:
+ description:
+ - Specifies the time when the disk was created. Time format is 'UTC
+ YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ is_bootable:
+ description:
+ - Specifies whether the disk is bootable.
+ type: bool
+ returned: success
+ is_readonly:
+ description:
+ - Specifies whether the disk is read-only or read/write. True
+ indicates that the disk is read-only. False indicates that the
+ disk is read/write.
+ type: bool
+ returned: success
+ source_volume_id:
+ description:
+ - Specifies the source disk ID. This parameter has a value if the
+ disk is created from a source disk.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the disk status.
+ type: str
+ returned: success
+ tags:
+ description:
+ - Specifies the disk tags.
+ type: dict
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ volume_type=dict(type='str', required=True),
+ backup_id=dict(type='str'),
+ description=dict(type='str'),
+ enable_full_clone=dict(type='bool'),
+ enable_scsi=dict(type='bool'),
+ enable_share=dict(type='bool'),
+ encryption_id=dict(type='str'),
+ enterprise_project_id=dict(type='str'),
+ image_id=dict(type='str'),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "evs")
+
+ try:
+ _init(config)
+ is_exist = module.params.get('id')
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params.get('id'):
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("find more than one resources(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "backup_id": module.params.get("backup_id"),
+ "description": module.params.get("description"),
+ "enable_full_clone": module.params.get("enable_full_clone"),
+ "enable_scsi": module.params.get("enable_scsi"),
+ "enable_share": module.params.get("enable_share"),
+ "encryption_id": module.params.get("encryption_id"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "size": module.params.get("size"),
+ "snapshot_id": module.params.get("snapshot_id"),
+ "volume_type": module.params.get("volume_type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ obj = async_wait(config, r, client1, timeout)
+ module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+
+ params = build_update_parameters(expect_state)
+ params1 = build_update_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_update_request(module, params, client)
+
+ params = build_extend_disk_parameters(expect_state)
+ params1 = build_extend_disk_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ client1 = config.client(get_region(module), "evsv2.1", "project")
+ r = send_extend_disk_request(module, params, client1)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client1, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ r = send_delete_request(module, None, client)
+
+ client = config.client(get_region(module), "volume", "project")
+ client.endpoint = client.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return res, None
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enable_share"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "multiattach=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["availability_zone"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "availability_zone=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={start}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ opts = user_input_parameters(module)
+ name = module.params.get("name")
+ query_link = _build_query_link(opts)
+ link = "os-vendor-volumes/detail" + query_link
+
+ result = []
+ p = {'start': 0}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ if name == item.get("name"):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['start'] += len(r)
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["backup_id"], None)
+ if not is_empty_value(v):
+ params["backup_id"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = expand_create_metadata(opts, None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["enable_share"], None)
+ if not is_empty_value(v):
+ params["multiattach"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["size"], None)
+ if not is_empty_value(v):
+ params["size"] = v
+
+ v = navigate_value(opts, ["snapshot_id"], None)
+ if not is_empty_value(v):
+ params["snapshot_id"] = v
+
+ v = navigate_value(opts, ["volume_type"], None)
+ if not is_empty_value(v):
+ params["volume_type"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def expand_create_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ if not is_empty_value(v):
+ r["__system__cmkid"] = v
+
+ v = expand_create_metadata_system_encrypted(d, array_index)
+ if not is_empty_value(v):
+ r["__system__encrypted"] = v
+
+ v = expand_create_metadata_full_clone(d, array_index)
+ if not is_empty_value(v):
+ r["full_clone"] = v
+
+ v = expand_create_metadata_hw_passthrough(d, array_index)
+ if not is_empty_value(v):
+ r["hw:passthrough"] = v
+
+ return r
+
+
+def expand_create_metadata_system_encrypted(d, array_index):
+ v = navigate_value(d, ["encryption_id"], array_index)
+ return "1" if v else ""
+
+
+def expand_create_metadata_full_clone(d, array_index):
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ return "0" if v else ""
+
+
+def expand_create_metadata_hw_passthrough(d, array_index):
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ if v is None:
+ return v
+ return "true" if v else "false"
+
+
+def send_create_request(module, params, client):
+ url = "cloudvolumes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if v is not None:
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_extend_disk_parameters(opts):
+ params = dict()
+
+ v = expand_extend_disk_os_extend(opts, None)
+ if not is_empty_value(v):
+ params["os-extend"] = v
+
+ return params
+
+
+def expand_extend_disk_os_extend(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["size"], array_index)
+ if not is_empty_value(v):
+ r["new_size"] = v
+
+ return r
+
+
+def send_extend_disk_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}/action")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(extend_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "job_id": ["job_id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "jobs/{job_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_evs_disk): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def send_read_request(module, client):
+ url = build_path(module, "os-vendor-volumes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volume"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_read_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_read_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = flatten_attachments(response, array_index)
+ r["attachments"] = v
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "metadata", "policy"], array_index)
+ r["backup_policy_id"] = v
+
+ v = navigate_value(response, ["read", "created_at"], array_index)
+ r["created_at"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = flatten_enable_full_clone(response, array_index)
+ r["enable_full_clone"] = v
+
+ v = flatten_enable_scsi(response, array_index)
+ r["enable_scsi"] = v
+
+ v = navigate_value(response, ["read", "multiattach"], array_index)
+ r["enable_share"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "__system__cmkid"], array_index)
+ r["encryption_id"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(
+ response, ["read", "volume_image_metadata", "id"], array_index)
+ r["image_id"] = v
+
+ v = flatten_is_bootable(response, array_index)
+ r["is_bootable"] = v
+
+ v = flatten_is_readonly(response, array_index)
+ r["is_readonly"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "size"], array_index)
+ r["size"] = v
+
+ v = navigate_value(response, ["read", "snapshot_id"], array_index)
+ r["snapshot_id"] = v
+
+ v = navigate_value(response, ["read", "source_volid"], array_index)
+ r["source_volume_id"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(response, ["read", "tags"], array_index)
+ r["tags"] = v
+
+ v = navigate_value(response, ["read", "volume_type"], array_index)
+ r["volume_type"] = v
+
+ return r
+
+
+def flatten_attachments(d, array_index):
+ v = navigate_value(d, ["read", "attachments"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.attachments"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
+ val["attached_at"] = v
+
+ v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
+ val["attachment_id"] = v
+
+ v = navigate_value(d, ["read", "attachments", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
+ val["server_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_full_clone(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "full_clone"],
+ array_index)
+ if v is None:
+ return v
+ return True if v == "0" else False
+
+
+def flatten_enable_scsi(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_bootable(d, array_index):
+ v = navigate_value(d, ["read", "bootable"], array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_readonly(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "readonly"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def set_unreadable_options(opts, states):
+ states["backup_id"] = opts.get("backup_id")
+
+
+def set_readonly_options(opts, states):
+ opts["attachments"] = states.get("attachments")
+
+ opts["backup_policy_id"] = states.get("backup_policy_id")
+
+ opts["created_at"] = states.get("created_at")
+
+ opts["is_bootable"] = states.get("is_bootable")
+
+ opts["is_readonly"] = states.get("is_readonly")
+
+ opts["source_volume_id"] = states.get("source_volume_id")
+
+ opts["status"] = states.get("status")
+
+ opts["tags"] = states.get("tags")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volumes"], None)
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ r["__system__cmkid"] = v
+
+ r["attached_mode"] = None
+
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ r["full_clone"] = v
+
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ r["hw:passthrough"] = v
+
+ r["policy"] = None
+
+ r["readonly"] = None
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_volume_image_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_list_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_list_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
new file mode 100644
index 00000000..f53369ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
@@ -0,0 +1,493 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_network_vpc
+description:
+ - Represents an vpc resource.
+short_description: Creates a Huawei Cloud VPC
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in vpc.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeout for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeout for update operation.
+ type: str
+ default: '15m'
+ delete:
+ description:
+ - The timeout for delete operation.
+ type: str
+ default: '15m'
+ name:
+ description:
+ - The name of vpc.
+ type: str
+ required: true
+ cidr:
+ description:
+ - The range of available subnets in the vpc.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a vpc
+ community.general.hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+'''
+
+RETURN = '''
+ id:
+ description:
+ - the id of vpc.
+ type: str
+ returned: success
+ name:
+ description:
+ - the name of vpc.
+ type: str
+ returned: success
+ cidr:
+ description:
+ - the range of available subnets in the vpc.
+ type: str
+ returned: success
+ status:
+ description:
+ - the status of vpc.
+ type: str
+ returned: success
+ routes:
+ description:
+ - the route information.
+ type: complex
+ returned: success
+ contains:
+ destination:
+ description:
+ - the destination network segment of a route.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - the next hop of a route. If the route type is peering,
+ it will provide VPC peering connection ID.
+ type: str
+ returned: success
+ enable_shared_snat:
+ description:
+ - show whether the shared snat is enabled.
+ type: bool
+ returned: success
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcClientException404, HwcModule,
+ are_different_dicts, is_empty_value,
+ wait_to_finish, get_region,
+ build_path, navigate_value)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(
+ default='present', choices=['present', 'absent'], type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ delete=dict(default='15m', type='str'),
+ ), default=dict()),
+ name=dict(required=True, type='str'),
+ cidr=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+ config = Config(module, 'vpc')
+
+ state = module.params['state']
+
+ if (not module.params.get("id")) and module.params.get("name"):
+ module.params['id'] = get_id_by_name(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "vpc", "project")
+ fetch = fetch_resource(module, client, link)
+ if fetch:
+ fetch = fetch.get('vpc')
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {"cidr": current_state["cidr"]}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config, self_link(module))
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config, self_link(module))
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config, "vpcs")
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.post(link, resource_to_create(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_done = wait_for_operation(config, 'create', r)
+ v = ""
+ try:
+ v = navigate_value(wait_done, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
+ return fetch_resource(module, client, url)
+
+
+def update(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.put(link, resource_to_update(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_operation(config, 'update', r)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_delete(module, client, link)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_id_by_name(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ name = module.params.get("name")
+ link = "vpcs"
+ query_link = "?marker={marker}&limit=10"
+ link += query_link
+ not_format_keys = re.findall("={marker}", link)
+ none_values = re.findall("=None", link)
+
+ if not (not_format_keys or none_values):
+ r = None
+ try:
+ r = client.get(link)
+ except Exception:
+ pass
+ if r is None:
+ return None
+ r = r.get('vpcs', [])
+ ids = [
+ i.get('id') for i in r if i.get('name', '') == name
+ ]
+ if not ids:
+ return None
+ elif len(ids) == 1:
+ return ids[0]
+ else:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+ elif none_values:
+ module.fail_json(
+ msg="Can not find id by name because url includes None.")
+ else:
+ p = {'marker': ''}
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('vpcs', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == name:
+ ids.add(i.get('id'))
+ if len(ids) >= 2:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+
+ p['marker'] = r[-1].get('id')
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "vpcs/{id}")
+
+
+def resource_to_create(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def resource_to_update(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def _get_editable_properties(module):
+ return {
+ "cidr": module.params.get("cidr"),
+ }
+
+
+def response_to_hash(module, response):
+ """ Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'id': response.get(u'id'),
+ u'name': response.get(u'name'),
+ u'cidr': response.get(u'cidr'),
+ u'status': response.get(u'status'),
+ u'routes': VpcRoutesArray(
+ response.get(u'routes', []), module).from_response(),
+ u'enable_shared_snat': response.get(u'enable_shared_snat')
+ }
+
+
+def wait_for_operation(config, op_type, op_result):
+ module = config.module
+ op_id = ""
+ try:
+ op_id = navigate_value(op_result, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
+ timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
+ states = {
+ 'create': {
+ 'allowed': ['CREATING', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ },
+ 'update': {
+ 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ }
+ }
+
+ return wait_for_completion(url, timeout, states[op_type]['allowed'],
+ states[op_type]['complete'], config)
+
+
+def wait_for_completion(op_uri, timeout, allowed_states,
+ complete_states, config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ def _refresh_status():
+ r = None
+ try:
+ r = fetch_resource(module, client, op_uri)
+ except Exception:
+ return None, ""
+
+ status = ""
+ try:
+ status = navigate_value(r, ['vpc', 'status'])
+ except Exception:
+ return None, ""
+
+ return r, status
+
+ try:
+ return wait_to_finish(complete_states, allowed_states,
+ _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def wait_for_delete(module, client, link):
+
+ def _refresh_status():
+ try:
+ client.get(link)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+ try:
+ return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+class VpcRoutesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return {
+ u'destination': item.get('destination'),
+ u'nexthop': item.get('next_hop')
+ }
+
+ def _response_from_item(self, item):
+ return {
+ u'destination': item.get(u'destination'),
+ u'next_hop': item.get(u'nexthop')
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
new file mode 100644
index 00000000..f7fb4fae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_smn_topic
+description:
+ - Represents a SMN notification topic resource.
+short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ type: str
+ required: false
+ name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a smn topic
+ community.general.hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user_name: "{{ user_name }}"
+ password: "{{ password }}"
+ domain_name: "{{ domain_name }}"
+ project_name: "{{ project_name }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+'''
+
+RETURN = '''
+create_time:
+ description:
+ - Time when the topic was created.
+ returned: success
+ type: str
+display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ returned: success
+ type: str
+name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ returned: success
+ type: str
+push_policy:
+ description:
+ - Message pushing policy. 0 indicates that the message sending
+ fails and the message is cached in the queue. 1 indicates that
+ the failed message is discarded.
+ returned: success
+ type: int
+topic_urn:
+ description:
+ - Resource identifier of a topic, which is unique.
+ returned: success
+ type: str
+update_time:
+ description:
+ - Time when the topic was updated.
+ returned: success
+ type: str
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcModule, navigate_value,
+ are_different_dicts, is_empty_value,
+ build_path, get_region)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ display_name=dict(type='str'),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ config = Config(module, "smn")
+
+ state = module.params['state']
+
+ if not module.params.get("id"):
+ module.params['id'] = get_resource_id(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "smn", "project")
+ fetch = fetch_resource(module, client, link)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_resource_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {'display_name': current_state['display_name']}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ r = None
+ try:
+ r = client.post(link, create_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return get_resource(config, r)
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.put(link, update_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_resource(config, result):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ v = ""
+ try:
+ v = navigate_value(result, ['topic_urn'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ d = {'topic_urn': v}
+ url = build_path(module, 'notifications/topics/{topic_urn}', d)
+
+ return fetch_resource(module, client, url)
+
+
+def get_resource_id(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ query_link = "?offset={offset}&limit=10"
+ link += query_link
+
+ p = {'offset': 0}
+ v = module.params.get('name')
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('topics', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == v:
+ ids.add(i.get('topic_urn'))
+ if len(ids) >= 2:
+ module.fail_json(msg="Multiple resources are found")
+
+ p['offset'] += 1
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "notifications/topics/{id}")
+
+
+def create_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ return params
+
+
+def update_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ return params
+
+
+def _get_resource_editable_properties(module):
+ return {
+ "display_name": module.params.get("display_name"),
+ }
+
+
+def response_to_hash(module, response):
+ """Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'create_time': response.get(u'create_time'),
+ u'display_name': response.get(u'display_name'),
+ u'name': response.get(u'name'),
+ u'push_policy': _push_policy_convert_from_response(
+ response.get('push_policy')),
+ u'topic_urn': response.get(u'topic_urn'),
+ u'update_time': response.get(u'update_time')
+ }
+
+
+def _push_policy_convert_from_response(value):
+ return {
+ 0: "the message sending fails and is cached in the queue",
+ 1: "the failed message is discarded",
+ }.get(int(value))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
new file mode 100644
index 00000000..b53395f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_eip
+description:
+ - elastic ip management.
+short_description: Creates a resource of Vpc/EIP in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '5m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '5m'
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ required: true
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ required: false
+ suboptions:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ required: false
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ required: false
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ required: false
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ required: false
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an eip and bind it to a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ register: port
+- name: Create an eip and bind it to a port
+ community.general.hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+'''
+
+RETURN = '''
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ returned: success
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ returned: success
+ contains:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows:.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ returned: success
+ id:
+ description:
+ - Specifies the ID of dedicated bandwidth.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ returned: success
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ returned: success
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ returned: success
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ returned: success
+ create_time:
+ description:
+ - Specifies the time (UTC time) when the EIP was assigned.
+ type: str
+ returned: success
+ ipv6_address:
+ description:
+ - Specifies the obtained IPv6 EIP.
+ type: str
+ returned: success
+ private_ip_address:
+ description:
+ - Specifies the private IP address bound with the EIP. This
+ parameter is returned only when a private IP address is bound
+ with the EIP.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='5m', type='str'),
+ update=dict(default='5m', type='str'),
+ ), default=dict()),
+ type=dict(type='str', required=True),
+ dedicated_bandwidth=dict(type='dict', options=dict(
+ charge_mode=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ size=dict(type='int', required=True)
+ )),
+ enterprise_project_id=dict(type='str'),
+ ip_version=dict(type='int'),
+ ipv4_address=dict(type='str'),
+ port_id=dict(type='str'),
+ shared_bandwidth_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "ip_version": module.params.get("ip_version"),
+ "ipv4_address": module.params.get("ipv4_address"),
+ "port_id": module.params.get("port_id"),
+ "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
+ "type": module.params.get("type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["publicip", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ if module.params["port_id"]:
+ module.params["port_id"] = ""
+ update(config)
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "publicips/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["ip_version"])
+ if v:
+ query_params.append("ip_version=" + str(v))
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "publicips" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_bandwidth(opts, None)
+ if not is_empty_value(v):
+ params["bandwidth"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ return params
+
+
+def expand_create_bandwidth(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ if not (v or sbwid):
+ raise Exception("must input shared_bandwidth_id or "
+ "dedicated_bandwidth")
+
+ if sbwid:
+ return {
+ "id": sbwid,
+ "share_type": "WHOLE"}
+
+ return {
+ "charge_mode": v["charge_mode"],
+ "name": v["name"],
+ "share_type": "PER",
+ "size": v["size"]}
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["ipv4_address"], array_index)
+ if not is_empty_value(v):
+ r["ip_address"] = v
+
+ v = navigate_value(d, ["ip_version"], array_index)
+ if not is_empty_value(v):
+ r["ip_version"] = v
+
+ v = navigate_value(d, ["type"], array_index)
+ if not is_empty_value(v):
+ r["type"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "publicips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "publicip_id": ["publicip", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "publicips/{publicip_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_version"], None)
+ if not is_empty_value(v):
+ params["ip_version"] = v
+
+ v = navigate_value(opts, ["port_id"], None)
+ if v is not None:
+ params["port_id"] = v
+
+ if not params:
+ return params
+
+ params = {"publicip": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "publicips/{id}")
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "publicips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "create_time"], array_index)
+ r["create_time"] = v
+
+ v = r.get("dedicated_bandwidth")
+ v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
+ r["dedicated_bandwidth"] = v
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "ip_version"], array_index)
+ r["ip_version"] = v
+
+ v = navigate_value(response, ["read", "public_ip_address"], array_index)
+ r["ipv4_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "public_ipv6_address"],
+ array_index)
+ r["ipv6_address"] = v
+
+ v = navigate_value(response, ["read", "port_id"], array_index)
+ r["port_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "private_ip_address"],
+ array_index)
+ r["private_ip_address"] = v
+
+ v = r.get("shared_bandwidth_id")
+ v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
+ r["shared_bandwidth_id"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ return r
+
+
+def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+ if not (v and v == "PER"):
+ return current_value
+
+ result = current_value
+ if not result:
+ result = dict()
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+ if v is not None:
+ result["id"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_name"], array_index)
+ if v is not None:
+ result["name"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_size"], array_index)
+ if v is not None:
+ result["size"] = v
+
+ return result if result else current_value
+
+
+def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+
+ v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+
+ return v if (v1 and v1 == "WHOLE") else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_bandwidth_id(all_opts, None)
+ result["bandwidth_id"] = v
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
+ result["bandwidth_name"] = v
+
+ result["bandwidth_share_type"] = None
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
+ result["bandwidth_size"] = v
+
+ result["create_time"] = None
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_version"], None)
+ result["ip_version"] = v
+
+ v = navigate_value(all_opts, ["port_id"], None)
+ result["port_id"] = v
+
+ result["private_ip_address"] = None
+
+ v = navigate_value(all_opts, ["ipv4_address"], None)
+ result["public_ip_address"] = v
+
+ result["public_ipv6_address"] = None
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ return result
+
+
+def expand_list_bandwidth_id(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ return sbwid
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
new file mode 100644
index 00000000..a4d5921b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_peering_connect
+description:
+ - vpc peering management.
+short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ required: true
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ required: true
+ suboptions:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ required: true
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ required: false
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ community.general.hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+'''
+
+RETURN = '''
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ returned: success
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ returned: success
+ contains:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ returned: success
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ returned: success
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ local_vpc_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ peering_vpc=dict(type='dict', required=True, options=dict(
+ vpc_id=dict(type='str', required=True),
+ project_id=dict(type='str')
+ )),
+ description=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "local_vpc_id": module.params.get("local_vpc_id"),
+ "name": module.params.get("name"),
+ "peering_vpc": module.params.get("peering_vpc"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["peering", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["local_vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/peerings" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_accept_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["accept_vpc_info"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_request_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["request_vpc_info"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def expand_create_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ if not is_empty_value(v):
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def expand_create_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = ""
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/peerings"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "peering_id": ["peering", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["peering", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["PENDING_ACCEPTANCE"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peering"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_read_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
+ array_index)
+ r["local_vpc_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = r.get("peering_vpc")
+ v = flatten_peering_vpc(response, array_index, v, exclude_output)
+ r["peering_vpc"] = v
+
+ return r
+
+
+def flatten_peering_vpc(d, array_index, current_value, exclude_output):
+ result = current_value
+ has_init_value = True
+ if not result:
+ result = dict()
+ has_init_value = False
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
+ array_index)
+ result["project_id"] = v
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
+ result["vpc_id"] = v
+
+ if has_init_value:
+ return result
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peerings"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_accept_vpc_info(all_opts, None)
+ result["accept_vpc_info"] = v
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = expand_list_request_vpc_info(all_opts, None)
+ result["request_vpc_info"] = v
+
+ result["status"] = None
+
+ return result
+
+
+def expand_list_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = None
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_list_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
new file mode 100644
index 00000000..cf0718f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
@@ -0,0 +1,1160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_port
+description:
+ - vpc port management.
+short_description: Creates a resource of Vpc/Port in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ required: true
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ required: false
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ required: false
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ required: false
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ required: false
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ required: false
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ required: false
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ community.general.hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ returned: success
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ returned: success
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ returned: success
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ returned: success
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ returned: success
+ contains:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ returned: success
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ returned: success
+ mac_address:
+ description:
+ - Specifies the port MAC address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ subnet_id=dict(type='str', required=True),
+ admin_state_up=dict(type='bool'),
+ allowed_address_pairs=dict(
+ type='list', elements='dict',
+ options=dict(
+ ip_address=dict(type='str'),
+ mac_address=dict(type='str')
+ ),
+ ),
+ extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ value=dict(type='str')
+ )),
+ ip_address=dict(type='str'),
+ name=dict(type='str'),
+ security_groups=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "admin_state_up": module.params.get("admin_state_up"),
+ "allowed_address_pairs": module.params.get("allowed_address_pairs"),
+ "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
+ "ip_address": module.params.get("ip_address"),
+ "name": module.params.get("name"),
+ "security_groups": module.params.get("security_groups"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["port", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "ports/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ array_index = {
+ "read.fixed_ips": 0,
+ }
+
+ return update_properties(module, res, array_index, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["subnet_id"])
+ if v:
+ query_params.append("network_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ v = navigate_value(opts, ["admin_state_up"])
+ if v:
+ query_params.append("admin_state_up=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "ports" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_state_up"], None)
+ if not is_empty_value(v):
+ params["admin_state_up"] = v
+
+ v = expand_create_allowed_address_pairs(opts, None)
+ if not is_empty_value(v):
+ params["allowed_address_pairs"] = v
+
+ v = expand_create_extra_dhcp_opts(opts, None)
+ if not is_empty_value(v):
+ params["extra_dhcp_opts"] = v
+
+ v = expand_create_fixed_ips(opts, None)
+ if not is_empty_value(v):
+ params["fixed_ips"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["network_id"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_create_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_create_request(module, params, client):
+ url = "ports"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "port_id": ["port", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "ports/{port_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["port", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ ["BUILD"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = expand_update_allowed_address_pairs(opts, None)
+ if v is not None:
+ params["allowed_address_pairs"] = v
+
+ v = expand_update_extra_dhcp_opts(opts, None)
+ if v is not None:
+ params["extra_dhcp_opts"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_update_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_update_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "ports/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["port"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_read_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "admin_state_up"], array_index)
+ r["admin_state_up"] = v
+
+ v = r.get("allowed_address_pairs")
+ v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
+ r["allowed_address_pairs"] = v
+
+ v = r.get("extra_dhcp_opts")
+ v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
+ r["extra_dhcp_opts"] = v
+
+ v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
+ array_index)
+ r["ip_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "mac_address"], array_index)
+ r["mac_address"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "security_groups"], array_index)
+ r["security_groups"] = v
+
+ v = navigate_value(response, ["read", "network_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def flatten_allowed_address_pairs(d, array_index,
+ current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "allowed_address_pairs"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.allowed_address_pairs"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
+ new_array_index)
+ val["ip_address"] = v
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
+ new_array_index)
+ val["mac_address"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "extra_dhcp_opts"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.extra_dhcp_opts"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
+ new_array_index)
+ val["name"] = v
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
+ new_array_index)
+ val["value"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["ports"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["admin_state_up"], None)
+ result["admin_state_up"] = v
+
+ v = expand_list_allowed_address_pairs(all_opts, None)
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = None
+
+ result["binding_vnic_type"] = None
+
+ result["device_id"] = None
+
+ result["device_owner"] = None
+
+ result["dns_name"] = None
+
+ v = expand_list_extra_dhcp_opts(all_opts, None)
+ result["extra_dhcp_opts"] = v
+
+ v = expand_list_fixed_ips(all_opts, None)
+ result["fixed_ips"] = v
+
+ result["id"] = None
+
+ result["mac_address"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["network_id"] = v
+
+ v = navigate_value(all_opts, ["security_groups"], None)
+ result["security_groups"] = v
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ return result
+
+
+def expand_list_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ transformed["mac_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ transformed["opt_value"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ transformed["ip_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_list_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
new file mode 100644
index 00000000..901755f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_private_ip
+description:
+ - vpc private ip management.
+short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
+ - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned. Cannot be changed after creating the private ip.
+ type: str
+ required: true
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address. Cannot be changed after
+ creating the private ip.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a private ip
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a private ip
+ community.general.hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ subnet_id=dict(type='str', required=True),
+ ip_address=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s)of an"
+ " existing resource.(%s)" % (current, expect, module.params.get('id')))
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "ip_address": module.params.get("ip_address"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["privateips", "id"],
+ {"privateips": 0})
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_address"], None)
+ if not is_empty_value(v):
+ params["ip_address"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["subnet_id"] = v
+
+ if not params:
+ return params
+
+ params = {"privateips": [params]}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "privateips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "privateips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "privateips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "ip_address"], array_index)
+ r["ip_address"] = v
+
+ v = navigate_value(response, ["read", "subnet_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_address"], None)
+ result["ip_address"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["subnet_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
new file mode 100644
index 00000000..31829dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_route
+description:
+ - vpc route management.
+short_description: Creates a resource of Vpc/Route in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
+ - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ required: true
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ required: false
+ default: 'peering'
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ register: connect
+- name: Create a route
+ community.general.hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+'''
+
+RETURN = '''
+ id:
+ description:
+ - UUID of the route.
+ type: str
+ returned: success
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ returned: success
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ destination=dict(type='str', required=True),
+ next_hop=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ type=dict(type='str', default='peering'),
+ id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = get_resource_by_id(config)
+ if module.params['state'] == 'present':
+ opts = user_input_parameters(module)
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing route.(%s)" % (resource, opts,
+ config.module.params.get(
+ 'id')))
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "destination": module.params.get("destination"),
+ "next_hop": module.params.get("next_hop"),
+ "type": module.params.get("type"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["route", "id"])
+
+ result = update_properties(module, {"read": fill_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+
+def get_resource_by_id(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_resp_body(r)
+
+ result = update_properties(module, res, None, exclude_output)
+ return result
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["type"])
+ if v:
+ query_params.append("type=" + str(v))
+
+ v = navigate_value(opts, ["destination"])
+ if v:
+ query_params.append("destination=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/routes" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["destination"], None)
+ if not is_empty_value(v):
+ params["destination"] = v
+
+ v = navigate_value(opts, ["next_hop"], None)
+ if not is_empty_value(v):
+ params["nexthop"] = v
+
+ v = navigate_value(opts, ["type"], None)
+ if not is_empty_value(v):
+ params["type"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"route": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/routes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["route"], None)
+
+
+def fill_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "destination"], array_index)
+ r["destination"] = v
+
+ v = navigate_value(response, ["read", "nexthop"], array_index)
+ r["next_hop"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["routes"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["destination"], None)
+ result["destination"] = v
+
+ v = navigate_value(all_opts, ["id"], None)
+ result["id"] = v
+
+ v = navigate_value(all_opts, ["next_hop"], None)
+ result["nexthop"] = v
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
new file mode 100644
index 00000000..60351815
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(name),
+ I(enterprise_project_id) and I(vpc_id) for security group selection.
+ - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ group selection. If more than one security group with this options exists,
+ execution is aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.s
+ type: str
+ required: false
+ default: 0
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group
+- name: Create a security group
+ community.general.hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+'''
+
+RETURN = '''
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ returned: success
+ rules:
+ description:
+ - Specifies the security group rule, which ensures that resources
+ in the security group can communicate with one another.
+ type: complex
+ returned: success
+ contains:
+ description:
+ description:
+ - Provides supplementary information about the security
+ group rule.
+ type: str
+ returned: success
+ direction:
+ description:
+ - Specifies the direction of access control. The value can
+ be egress or ingress.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4
+ or IPv6.
+ type: str
+ returned: success
+ id:
+ description:
+ - Specifies the security group rule ID.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to
+ 65535. If the protocol is not icmp, the value cannot be
+ smaller than the port_range_min value. An empty value
+ indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1
+ to 65535. The value cannot be greater than the
+ port_range_max value. An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp,
+ udp, or others. If the parameter is left blank, the
+ security group supports all protocols.
+ type: str
+ returned: success
+ remote_address_group_id:
+ description:
+ - Specifies the ID of remote IP address group.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control
+ direction is set to egress, the parameter specifies the
+ source IP address. If the access control direction is set
+ to ingress, the parameter specifies the destination IP
+ address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ name=dict(type='str', required=True),
+ enterprise_project_id=dict(type='str'),
+ vpc_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = read_resource(config)
+ if module.params['state'] == 'present':
+ check_resource_option(resource, module)
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def check_resource_option(resource, module):
+ opts = user_input_parameters(module)
+
+ resource = {
+ "enterprise_project_id": resource.get("enterprise_project_id"),
+ "name": resource.get("name"),
+ "vpc_id": resource.get("vpc_id"),
+ "id": resource.get("id"),
+ }
+
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (resource, opts,
+ module.params.get('id')))
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group", "id"])
+
+ result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-groups" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-groups"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-groups/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-groups/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ if not exclude_output:
+ v = r.get("rules")
+ v = flatten_rules(response, array_index, v, exclude_output)
+ r["rules"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def flatten_rules(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "security_group_rules"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.security_group_rules"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "description"],
+ new_array_index)
+ val["description"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "direction"],
+ new_array_index)
+ val["direction"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
+ new_array_index)
+ val["ethertype"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "id"],
+ new_array_index)
+ val["id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
+ new_array_index)
+ val["port_range_max"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
+ new_array_index)
+ val["port_range_min"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "protocol"],
+ new_array_index)
+ val["protocol"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
+ new_array_index)
+ val["remote_address_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
+ new_array_index)
+ val["remote_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
+ new_array_index)
+ val["remote_ip_prefix"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_groups"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["security_group_rules"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
new file mode 100644
index 00000000..f92c8276
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group_rule
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over
+ I(enterprise_project_id) for security group rule selection.
+ - I(security_group_id) is used for security group rule selection. If more
+ than one security group rule with this options exists, execution is
+ aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ required: true
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ required: true
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ required: false
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ required: false
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ required: false
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ required: false
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ required: false
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ required: false
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group rule
+- name: Create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ register: sg
+- name: Create a security group rule
+ community.general.hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 22
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+'''
+
+RETURN = '''
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ returned: success
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ returned: success
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ direction=dict(type='str', required=True),
+ security_group_id=dict(type='str', required=True),
+ description=dict(type='str'),
+ ethertype=dict(type='str'),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ protocol=dict(type='str'),
+ remote_group_id=dict(type='str'),
+ remote_ip_prefix=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (current, expect, module.params.get('id')))
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "direction": module.params.get("direction"),
+ "ethertype": module.params.get("ethertype"),
+ "port_range_max": module.params.get("port_range_max"),
+ "port_range_min": module.params.get("port_range_min"),
+ "protocol": module.params.get("protocol"),
+ "remote_group_id": module.params.get("remote_group_id"),
+ "remote_ip_prefix": module.params.get("remote_ip_prefix"),
+ "security_group_id": module.params.get("security_group_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["security_group_id"])
+ if v:
+ query_link += "&security_group_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-group-rules" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["direction"], None)
+ if not is_empty_value(v):
+ params["direction"] = v
+
+ v = navigate_value(opts, ["ethertype"], None)
+ if not is_empty_value(v):
+ params["ethertype"] = v
+
+ v = navigate_value(opts, ["port_range_max"], None)
+ if not is_empty_value(v):
+ params["port_range_max"] = v
+
+ v = navigate_value(opts, ["port_range_min"], None)
+ if not is_empty_value(v):
+ params["port_range_min"] = v
+
+ v = navigate_value(opts, ["protocol"], None)
+ if not is_empty_value(v):
+ params["protocol"] = v
+
+ v = navigate_value(opts, ["remote_group_id"], None)
+ if not is_empty_value(v):
+ params["remote_group_id"] = v
+
+ v = navigate_value(opts, ["remote_ip_prefix"], None)
+ if not is_empty_value(v):
+ params["remote_ip_prefix"] = v
+
+ v = navigate_value(opts, ["security_group_id"], None)
+ if not is_empty_value(v):
+ params["security_group_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group_rule": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-group-rules"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rule"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "direction"], array_index)
+ r["direction"] = v
+
+ v = navigate_value(response, ["read", "ethertype"], array_index)
+ r["ethertype"] = v
+
+ v = navigate_value(response, ["read", "port_range_max"], array_index)
+ r["port_range_max"] = v
+
+ v = navigate_value(response, ["read", "port_range_min"], array_index)
+ r["port_range_min"] = v
+
+ v = navigate_value(response, ["read", "protocol"], array_index)
+ r["protocol"] = v
+
+ v = navigate_value(response, ["read", "remote_group_id"], array_index)
+ r["remote_group_id"] = v
+
+ v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
+ r["remote_ip_prefix"] = v
+
+ v = navigate_value(response, ["read", "security_group_id"], array_index)
+ r["security_group_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rules"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["direction"], None)
+ result["direction"] = v
+
+ v = navigate_value(all_opts, ["ethertype"], None)
+ result["ethertype"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["port_range_max"], None)
+ result["port_range_max"] = v
+
+ v = navigate_value(all_opts, ["port_range_min"], None)
+ result["port_range_min"] = v
+
+ v = navigate_value(all_opts, ["protocol"], None)
+ result["protocol"] = v
+
+ result["remote_address_group_id"] = None
+
+ v = navigate_value(all_opts, ["remote_group_id"], None)
+ result["remote_group_id"] = v
+
+ v = navigate_value(all_opts, ["remote_ip_prefix"], None)
+ result["remote_ip_prefix"] = v
+
+ v = navigate_value(all_opts, ["security_group_id"], None)
+ result["security_group_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
new file mode 100644
index 00000000..ccf18050
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
@@ -0,0 +1,734 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_subnet
+description:
+ - subnet management.
+short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '15m'
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs. Cannot
+ be changed after creating the subnet.
+ type: str
+ required: true
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs. Cannot be changed
+ after creating the subnet.
+ type: str
+ required: false
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ required: false
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create subnet
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ community.general.hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+'''
+
+RETURN = '''
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28.
+ type: str
+ returned: success
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs.
+ type: str
+ returned: success
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs.
+ type: str
+ returned: success
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ returned: success
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ ), default=dict()),
+ cidr=dict(type='str', required=True),
+ gateway_ip=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ availability_zone=dict(type='str'),
+ dhcp_enable=dict(type='bool'),
+ dns_address=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get('id'):
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "cidr": module.params.get("cidr"),
+ "dhcp_enable": module.params.get("dhcp_enable"),
+ "dns_address": module.params.get("dns_address"),
+ "gateway_ip": module.params.get("gateway_ip"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["subnet", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "subnets/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_link += "&vpc_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "subnets" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["cidr"], None)
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_create_dns_list(opts, None)
+ if not is_empty_value(v):
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["gateway_ip"], None)
+ if not is_empty_value(v):
+ params["gateway_ip"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_primary_dns(opts, None)
+ if not is_empty_value(v):
+ params["primary_dns"] = v
+
+ v = expand_create_secondary_dns(opts, None)
+ if not is_empty_value(v):
+ params["secondary_dns"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_create_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v if (v and len(v) > 2) else []
+
+
+def expand_create_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_create_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_create_request(module, params, client):
+ url = "subnets"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_update_dns_list(opts, None)
+ if v is not None:
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_update_primary_dns(opts, None)
+ if v is not None:
+ params["primary_dns"] = v
+
+ v = expand_update_secondary_dns(opts, None)
+ if v is not None:
+ params["secondary_dns"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_update_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ if v:
+ if len(v) > 2:
+ return v
+ return None
+ return []
+
+
+def expand_update_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_update_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "subnets/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnet"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "cidr"], array_index)
+ r["cidr"] = v
+
+ v = navigate_value(response, ["read", "dhcp_enable"], array_index)
+ r["dhcp_enable"] = v
+
+ v = navigate_value(response, ["read", "dnsList"], array_index)
+ r["dns_address"] = v
+
+ v = navigate_value(response, ["read", "gateway_ip"], array_index)
+ r["gateway_ip"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnets"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["availability_zone"] = v
+
+ v = navigate_value(all_opts, ["cidr"], None)
+ result["cidr"] = v
+
+ v = navigate_value(all_opts, ["dhcp_enable"], None)
+ result["dhcp_enable"] = v
+
+ v = navigate_value(all_opts, ["dns_address"], None)
+ result["dnsList"] = v
+
+ v = navigate_value(all_opts, ["gateway_ip"], None)
+ result["gateway_ip"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["neutron_network_id"] = None
+
+ result["neutron_subnet_id"] = None
+
+ result["primary_dns"] = None
+
+ result["secondary_dns"] = None
+
+ result["status"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
new file mode 100644
index 00000000..29690497
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_domain
+short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ domain:
+ description:
+ - Name of the domain to be managed.
+ required: true
+ state:
+ description:
+ - The desired state of the domain.
+ default: "present"
+ choices: [ "present", "absent" ]
+ ldap_id:
+ description:
+ - ldap id to add to the domain.
+ required: false
+ size:
+ description:
+ - Size of the domain.
+ required: false
+ hard_capacity:
+ description:
+ - Hard capacity of the domain.
+ required: false
+ soft_capacity:
+ description:
+ - Soft capacity of the domain.
+ required: false
+ max_cgs:
+ description:
+ - Number of max cgs.
+ required: false
+ max_dms:
+ description:
+ - Number of max dms.
+ required: false
+ max_mirrors:
+ description:
+ - Number of max_mirrors.
+ required: false
+ max_pools:
+ description:
+ - Number of max_pools.
+ required: false
+ max_volumes:
+ description:
+ - Number of max_volumes.
+ required: false
+ perf_class:
+ description:
+ - Add the domain to a performance class.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ size: domain_size
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+msg:
+ description: module return status.
+ returned: as needed
+ type: str
+ sample: "domain 'domain_name' created successfully."
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ domain=dict(required=True),
+ size=dict(),
+ max_dms=dict(),
+ max_cgs=dict(),
+ ldap_id=dict(),
+ max_mirrors=dict(),
+ max_pools=dict(),
+ max_volumes=dict(),
+ perf_class=dict(),
+ hard_capacity=dict(),
+ soft_capacity=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ domain = xcli_client.cmd.domain_list(
+ domain=module.params['domain']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ msg = 'Domain \'{0}\''.format(module.params['domain'])
+ if state == 'present' and not domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_create', xcli_client)
+ msg += " created successfully."
+ elif state == 'absent' and domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_delete', xcli_client)
+ msg += " deleted successfully."
+ else:
+ msg += " state unchanged."
+
+ module.exit_json(changed=state_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
new file mode 100644
index 00000000..5ce12992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host
+short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ cluster:
+ description:
+ - The name of the cluster to include the host.
+ required: false
+ domain:
+ description:
+ - The domains the cluster will be attached to.
+ To include more than one domain,
+ separate domain names with commas.
+ To include all existing domains, use an asterisk ("*").
+ required: false
+ iscsi_chap_name:
+ description:
+ - The host's CHAP name identifier
+ required: false
+ iscsi_chap_secret:
+ description:
+ - The password of the initiator used to
+ authenticate to the system when CHAP is enable
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ cluster=dict(),
+ domain=dict(),
+ iscsi_chap_name=dict(),
+ iscsi_chap_secret=dict(no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ host = xcli_client.cmd.host_list(
+ host=module.params['host']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_define', xcli_client)
+ elif state == 'absent' and host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
new file mode 100644
index 00000000..981bc553
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host_ports
+short_description: Add host ports on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds ports to or removes them from the hosts
+ on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host ports state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ iscsi_name:
+ description:
+ - iSCSI initiator name.
+ required: false
+ fcaddress:
+ description:
+ - Fiber channel address.
+ required: false
+ num_of_visible_targets:
+ description:
+ - Number of visible targets.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Add ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Remove ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
+ spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ iscsi_name=dict(),
+ fcaddress=dict(),
+ num_of_visible_targets=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ ports = []
+ try:
+ ports = xcli_client.cmd.host_list_ports(
+ host=module.params.get('host')).as_list
+ except Exception:
+ pass
+ state = module.params['state']
+ port_exists = False
+ ports = [port.get('port_name') for port in ports]
+
+ fc_ports = (module.params.get('fcaddress')
+ if module.params.get('fcaddress') else [])
+ iscsi_ports = (module.params.get('iscsi_name')
+ if module.params.get('iscsi_name') else [])
+ for port in ports:
+ if port in iscsi_ports or port in fc_ports:
+ port_exists = True
+ break
+ state_changed = False
+ if state == 'present' and not port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_add_port', xcli_client)
+ if state == 'absent' and port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_remove_port', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
new file mode 100644
index 00000000..812904eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_pool
+short_description: Handles pools on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
+
+options:
+ pool:
+ description:
+ - Pool name.
+ required: true
+ state:
+ description:
+ - Pool state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Pool size in GB
+ required: false
+ snapshot_size:
+ description:
+ - Pool snapshot size in GB
+ required: false
+ domain:
+ description:
+ - Adds the pool to the specified domain.
+ required: false
+ perf_class:
+ description:
+ - Assigns a perf_class to the pool.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create new pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ size: 300
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ snapshot_size=dict(),
+ domain=dict(),
+ perf_class=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ pool = xcli_client.cmd.pool_list(
+ pool=module.params['pool']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_create', xcli_client)
+ if state == 'absent' and pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
new file mode 100644
index 00000000..bf578cee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol
+short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ pool:
+ description:
+ - Volume pool.
+ required: false
+ state:
+ description:
+ - Volume state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Volume size.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create a new volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ pool: pool_name
+ size: 17
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete an existing volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ pool=dict(),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ volume = xcli_client.cmd.vol_list(
+ vol=module.params.get('vol')).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_create', xcli_client)
+ elif state == 'absent' and volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
new file mode 100644
index 00000000..f1f5a807
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol_map
+short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module maps volumes to or unmaps them from the hosts on
+ IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ state:
+ default: "present"
+ choices: [ "present", "absent" ]
+ description:
+ - When the state is present the volume is mapped.
+ When the state is absent, the volume is meant to be unmapped.
+
+ cluster:
+ description:
+ - Maps the volume to a cluster.
+ required: false
+ host:
+ description:
+ - Maps the volume to a host.
+ required: false
+ lun:
+ description:
+ - The LUN identifier.
+ required: false
+ override:
+ description:
+ - Overrides the existing volume mapping.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Map volume to host.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Map volume to cluster.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ cluster: cluster_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Unmap volume.
+ community.general.ibm_sa_vol_map:
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ lun=dict(),
+ cluster=dict(),
+ host=dict(),
+ override=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ mapping = False
+ try:
+ mapped_hosts = xcli_client.cmd.vol_mapping_list(
+ vol=module.params.get('vol')).as_list
+ for host in mapped_hosts:
+ if host['host'] == module.params.get("host", ""):
+ mapping = True
+ except Exception:
+ pass
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not mapping:
+ state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
+ if state == 'absent' and mapping:
+ state_changed = execute_pyxcli_command(
+ module, 'unmap_vol', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py
new file mode 100644
index 00000000..b59c0e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Copyright (c) 2018, Ansible Project
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_feature
+
+short_description: Manage Icinga2 feature
+description:
+ - This module can be used to enable or disable an Icinga2 feature.
+author: "Loic Blot (@nerzhul)"
+options:
+ name:
+ type: str
+ description:
+ - This is the feature name to enable or disable.
+ required: True
+ state:
+ type: str
+ description:
+ - If set to C(present) and feature is disabled, then feature is enabled.
+ - If set to C(present) and feature is already enabled, then nothing is changed.
+ - If set to C(absent) and feature is enabled, then feature is disabled.
+ - If set to C(absent) and feature is already disabled, then nothing is changed.
+ choices: [ "present", "absent" ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Enable ido-pgsql feature
+ community.general.icinga2_feature:
+ name: ido-pgsql
+ state: present
+
+- name: Disable api feature
+ community.general.icinga2_feature:
+ name: api
+ state: absent
+'''
+
+RETURN = '''
+#
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Icinga2FeatureHelper:
+ def __init__(self, module):
+ self.module = module
+ self._icinga2 = module.get_bin_path('icinga2', True)
+ self.feature_name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ def _exec(self, args):
+ cmd = [self._icinga2, 'feature']
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return rc, out
+
+ def manage(self):
+ rc, out = self._exec(["list"])
+ if rc != 0:
+ self.module.fail_json(msg="Unable to list icinga2 features. "
+ "Ensure icinga2 is installed and present in binary path.")
+
+ # If feature is already in good state, just exit
+ if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
+ (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
+ self.module.exit_json(changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ feature_enable_str = "enable" if self.state == "present" else "disable"
+
+ rc, out = self._exec([feature_enable_str, self.feature_name])
+
+ change_applied = False
+ if self.state == "present":
+ if rc != 0:
+ self.module.fail_json(msg="Failed to %s feature %s."
+ " icinga2 command returned %s" % (feature_enable_str,
+ self.feature_name,
+ out))
+
+ if re.search("already enabled", out) is None:
+ change_applied = True
+ else:
+ if rc == 0:
+ change_applied = True
+ # RC is not 0 for this already disabled feature, handle it as no change applied
+ elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
+ change_applied = False
+ else:
+ self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
+
+ self.module.exit_json(changed=change_applied)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=["present", "absent"], default="present")
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+ Icinga2FeatureHelper(module).manage()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py
new file mode 100644
index 00000000..65c95812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is proudly sponsored by CGI (www.cgi.com) and
+# KPN (www.kpn.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_host
+short_description: Manage a host in Icinga2
+description:
+ - "Add or remove a host to Icinga2 through the API."
+ - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
+author: "Jurgen Brand (@t794104)"
+options:
+ url:
+ type: str
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ url_username:
+ type: str
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ url_password:
+ type: str
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ type: bool
+ default: 'no'
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client
+ authentication. This file can also include the key as well, and if
+ the key is included, C(client_key) is not required.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL
+ client authentication. If C(client_cert) contains both the certificate
+ and key, this option is not required.
+ state:
+ type: str
+ description:
+ - Apply feature state.
+ choices: [ "present", "absent" ]
+ default: present
+ name:
+ type: str
+ description:
+ - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
+ required: true
+ zone:
+ type: str
+ description:
+ - The zone from where this host should be polled.
+ template:
+ type: str
+ description:
+ - The template used to define the host.
+ - Template cannot be modified after object creation.
+ check_command:
+ type: str
+ description:
+ - The command used to check if the host is alive.
+ default: "hostalive"
+ display_name:
+ type: str
+ description:
+ - The name used to display the host.
+ - If not specified, it defaults to the value of the I(name) parameter.
+ ip:
+ type: str
+ description:
+ - The IP address of the host.
+ required: true
+ variables:
+ type: dict
+ description:
+ - Dictionary of variables.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Add host to icinga
+ community.general.icinga2_host:
+ url: "https://icinga2.example.com"
+ url_username: "ansible"
+ url_password: "a_secret"
+ state: present
+ name: "{{ ansible_fqdn }}"
+ ip: "{{ ansible_default_ipv4.address }}"
+ variables:
+ foo: "bar"
+ delegate_to: 127.0.0.1
+'''
+
+RETURN = '''
+name:
+ description: The name used to create, modify or delete the host
+ type: str
+ returned: always
+data:
+ description: The data structure used for create, modify or delete of the host
+ type: dict
+ returned: always
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+# ===========================================
+# Icinga2 API class
+#
+class icinga2_api:
+ module = None
+
+ def __init__(self, module):
+ self.module = module
+
+ def call_url(self, path, data='', method='GET'):
+ headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': method,
+ }
+ url = self.module.params.get("url") + "/" + path
+ rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy'])
+ body = ''
+ if rsp:
+ body = json.loads(rsp.read())
+ if info['status'] >= 400:
+ body = info['body']
+ return {'code': info['status'], 'data': body}
+
+ def check_connection(self):
+ ret = self.call_url('v1/status')
+ if ret['code'] == 200:
+ return True
+ return False
+
+ def exists(self, hostname):
+ data = {
+ "filter": "match(\"" + hostname + "\", host.name)",
+ }
+ ret = self.call_url(
+ path="v1/objects/hosts",
+ data=self.module.jsonify(data)
+ )
+ if ret['code'] == 200:
+ if len(ret['data']['results']) == 1:
+ return True
+ return False
+
+ def create(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="PUT"
+ )
+ return ret
+
+ def delete(self, hostname):
+ data = {"cascade": 1}
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="DELETE"
+ )
+ return ret
+
+ def modify(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="POST"
+ )
+ return ret
+
+ def diff(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ method="GET"
+ )
+ changed = False
+ ic_data = ret['data']['results'][0]
+ for key in data['attrs']:
+ if key not in ic_data['attrs'].keys():
+ changed = True
+ elif data['attrs'][key] != ic_data['attrs'][key]:
+ changed = True
+ return changed
+
+
+# ===========================================
+# Module execution.
+#
+def main():
+ # use the predefined argument spec for url
+ argument_spec = url_argument_spec()
+ # add our own arguments
+ argument_spec.update(
+ state=dict(default="present", choices=["absent", "present"]),
+ name=dict(required=True, aliases=['host']),
+ zone=dict(),
+ template=dict(default=None),
+ check_command=dict(default="hostalive"),
+ display_name=dict(default=None),
+ ip=dict(required=True),
+ variables=dict(type='dict', default=None),
+ )
+
+ # Define the main module
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ name = module.params["name"]
+ zone = module.params["zone"]
+ template = [name]
+ if module.params["template"]:
+ template.append(module.params["template"])
+ check_command = module.params["check_command"]
+ ip = module.params["ip"]
+ display_name = module.params["display_name"]
+ if not display_name:
+ display_name = name
+ variables = module.params["variables"]
+
+ try:
+ icinga = icinga2_api(module=module)
+ icinga.check_connection()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
+
+ data = {
+ 'attrs': {
+ 'address': ip,
+ 'display_name': display_name,
+ 'check_command': check_command,
+ 'zone': zone,
+ 'vars': {
+ 'made_by': "ansible",
+ },
+ 'templates': template,
+ }
+ }
+
+ if variables:
+ data['attrs']['vars'].update(variables)
+
+ changed = False
+ if icinga.exists(name):
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=True, name=name, data=data)
+ else:
+ try:
+ ret = icinga.delete(name)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception deleting host: " + str(e))
+
+ elif icinga.diff(name, data):
+ if module.check_mode:
+ module.exit_json(changed=False, name=name, data=data)
+
+ # Template attribute is not allowed in modification
+ del data['attrs']['templates']
+
+ ret = icinga.modify(name, data)
+
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data']))
+
+ else:
+ if state == "present":
+ if module.check_mode:
+ changed = True
+ else:
+ try:
+ ret = icinga.create(name, data)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception creating host: " + str(e))
+
+ module.exit_json(changed=changed, name=name, data=data)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py
new file mode 100644
index 00000000..756b6cf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_config
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage Global FreeIPA Configuration Settings
+description:
+- Modify global configuration settings of a FreeIPA Server.
+options:
+ ipadefaultloginshell:
+ description: Default shell for new users.
+ aliases: ["loginshell"]
+ type: str
+ ipadefaultemaildomain:
+ description: Default e-mail domain for new users.
+ aliases: ["emaildomain"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure the default login shell is bash.
+ community.general.ipa_config:
+ ipadefaultloginshell: /bin/bash
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default e-mail domain is ansible.com.
+ community.general.ipa_config:
+ ipadefaultemaildomain: ansible.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+config:
+ description: Configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def config_show(self):
+ return self._post_json(method='config_show', name=None)
+
+ def config_mod(self, name, item):
+ return self._post_json(method='config_mod', name=name, item=item)
+
+
+def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
+ config = {}
+ if ipadefaultloginshell is not None:
+ config['ipadefaultloginshell'] = ipadefaultloginshell
+ if ipadefaultemaildomain is not None:
+ config['ipadefaultemaildomain'] = ipadefaultemaildomain
+
+ return config
+
+
+def get_config_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_config = get_config_dict(
+ ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
+ ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
+ )
+ ipa_config = client.config_show()
+ diff = get_config_diff(client, ipa_config, module_config)
+
+ changed = False
+ new_config = {}
+ for module_key in diff:
+ if module_config.get(module_key) != ipa_config.get(module_key, None):
+ changed = True
+ new_config.update({module_key: module_config.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.config_mod(name=None, item=new_config)
+
+ return changed, client.config_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
+ ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = ConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py
new file mode 100644
index 00000000..635bf2ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnsrecord
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA DNS records
+description:
+- Add, modify and delete an IPA DNS Record using IPA API.
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which DNS record needs to be managed.
+ required: true
+ type: str
+ record_name:
+ description:
+ - The DNS record name to manage.
+ required: true
+ aliases: ["name"]
+ type: str
+ record_type:
+ description:
+ - The type of DNS record name.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
+ - "'SRV' and 'MX' are added in version 2.8."
+ required: false
+ default: 'A'
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ type: str
+ record_value:
+ description:
+ - Manage DNS record name with this value.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ required: true
+ type: str
+ record_ttl:
+ description:
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of record_value.
+ required: false
+ type: int
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: vm-001
+ record_type: 'AAAA'
+ record_value: '::1'
+
+- name: Ensure that dns record exists with a TTL
+ community.general.ipa_dnsrecord:
+ name: host02
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ record_ttl: 300
+ ipa_host: ipa.example.com
+ ipa_pass: topsecret
+ state: present
+
+- name: Ensure a PTR record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: 2.168.192.in-addr.arpa
+ record_name: 5
+ record_type: 'PTR'
+ record_value: 'internal.ipa.example.com'
+
+- name: Ensure a TXT record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos
+ record_type: 'TXT'
+ record_value: 'EXAMPLE.COM'
+
+- name: Ensure an SRV record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos._udp.example.com
+ record_type: 'SRV'
+ record_value: '10 50 88 ipa.example.com'
+
+- name: Ensure an MX record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: '@'
+ record_type: 'MX'
+ record_value: '1 mailserver.example.com'
+
+- name: Ensure that dns record is removed
+ community.general.ipa_dnsrecord:
+ name: host01
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+dnsrecord:
+ description: DNS record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSRecordIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSRecordIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnsrecord_find(self, zone_name, record_name):
+ if record_name == '@':
+ return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True})
+ else:
+ return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True})
+
+ def dnsrecord_add(self, zone_name=None, record_name=None, details=None):
+ item = dict(idnsname=record_name)
+ if details['record_type'] == 'A':
+ item.update(a_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'AAAA':
+ item.update(aaaa_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'A6':
+ item.update(a6_part_data=details['record_value'])
+ elif details['record_type'] == 'CNAME':
+ item.update(cname_part_hostname=details['record_value'])
+ elif details['record_type'] == 'DNAME':
+ item.update(dname_part_target=details['record_value'])
+ elif details['record_type'] == 'PTR':
+ item.update(ptr_part_hostname=details['record_value'])
+ elif details['record_type'] == 'TXT':
+ item.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV':
+ item.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX':
+ item.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+
+ return self._post_json(method='dnsrecord_add', name=zone_name, item=item)
+
+ def dnsrecord_mod(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+ return self._post_json(method='dnsrecord_mod', name=zone_name, item=item)
+
+ def dnsrecord_del(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ return self._post_json(method='dnsrecord_del', name=zone_name, item=item)
+
+
+def get_dnsrecord_dict(details=None):
+ module_dnsrecord = dict()
+ if details['record_type'] == 'A' and details['record_value']:
+ module_dnsrecord.update(arecord=details['record_value'])
+ elif details['record_type'] == 'AAAA' and details['record_value']:
+ module_dnsrecord.update(aaaarecord=details['record_value'])
+ elif details['record_type'] == 'A6' and details['record_value']:
+ module_dnsrecord.update(a6record=details['record_value'])
+ elif details['record_type'] == 'CNAME' and details['record_value']:
+ module_dnsrecord.update(cnamerecord=details['record_value'])
+ elif details['record_type'] == 'DNAME' and details['record_value']:
+ module_dnsrecord.update(dnamerecord=details['record_value'])
+ elif details['record_type'] == 'PTR' and details['record_value']:
+ module_dnsrecord.update(ptrrecord=details['record_value'])
+ elif details['record_type'] == 'TXT' and details['record_value']:
+ module_dnsrecord.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV' and details['record_value']:
+ module_dnsrecord.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX' and details['record_value']:
+ module_dnsrecord.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ module_dnsrecord.update(dnsttl=details['record_ttl'])
+
+ return module_dnsrecord
+
+
+def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord):
+ details = get_dnsrecord_dict(module_dnsrecord)
+ return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details)
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ record_name = module.params['record_name']
+ record_ttl = module.params.get('record_ttl')
+ state = module.params['state']
+
+ ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name)
+
+ module_dnsrecord = dict(
+ record_type=module.params['record_type'],
+ record_value=module.params['record_value'],
+ record_ttl=to_native(record_ttl, nonstring='passthru'),
+ )
+
+ # ttl is not required to change records
+ if module_dnsrecord['record_ttl'] is None:
+ module_dnsrecord.pop('record_ttl')
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_add(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_mod(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ if ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_del(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+
+ return changed, client.dnsrecord_find(zone_name, record_name)
+
+
+def main():
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ zone_name=dict(type='str', required=True),
+ record_name=dict(type='str', aliases=['name'], required=True),
+ record_type=dict(type='str', default='A', choices=record_types),
+ record_value=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ record_ttl=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = DNSRecordIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py
new file mode 100644
index 00000000..1536866c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
+# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnszone
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage FreeIPA DNS Zones
+description:
+- Add and delete an IPA DNS Zones using IPA API
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which needs to be managed.
+ required: true
+ type: str
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ dynamicupdate:
+ description: Apply dynamic update to zone
+ required: false
+ default: "false"
+ choices: ["false", "true"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns zone is present
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+
+- name: Ensure dns zone is present and is dynamic update
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ dynamicupdate: true
+
+- name: Ensure that dns zone is removed
+ community.general.ipa_dnszone:
+ zone_name: example.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+zone:
+ description: DNS zone as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSZoneIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnszone_find(self, zone_name, details=None):
+ itens = {'idnsname': zone_name}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_find',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_add(self, zone_name=None, details=None):
+ itens = {}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_add',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_del(self, zone_name=None, record_name=None, details=None):
+ return self._post_json(
+ method='dnszone_del', name=zone_name, item={})
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ state = module.params['state']
+ dynamicupdate = module.params['dynamicupdate']
+
+ ipa_dnszone = client.dnszone_find(zone_name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate})
+ else:
+ changed = False
+ else:
+ if ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_del(zone_name=zone_name)
+
+ return changed, client.dnszone_find(zone_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(zone_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = DNSZoneIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, zone = ensure(module, client)
+ module.exit_json(changed=changed, zone=zone)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py
new file mode 100644
index 00000000..84ff443a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+- Add, modify and delete group within IPA server
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - Description of the group.
+ type: str
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ type: bool
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ aliases: ['gid']
+ type: str
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If an empty list is passed all groups will be removed from this group.
+ - If option is omitted assigned groups will not be checked or changed.
+ - Groups that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ type: bool
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If an empty list is passed all users will be removed from this group.
+ - If option is omitted assigned users will not be checked or changed.
+ - Users that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure group is present
+ community.general.ipa_group:
+ name: oinstall
+ gidnumber: '54321'
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that groups sysops and appops are assigned to ops but no other group
+ community.general.ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that users linus and larry are assign to the group, but no other user
+ community.general.ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure group is absent
+ community.general.ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class GroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(client, ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ if 'external' in module_group:
+ if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'):
+ del module_group['external']
+
+ return client.get_diff(ipa_data=ipa_group, module_data=module_group)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ user = module.params['user']
+
+ module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
+ gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(client, ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group) or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user) or changed
+
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ external=dict(type='bool'),
+ gidnumber=dict(type='str', aliases=['gid']),
+ group=dict(type='list', elements='str'),
+ nonposix=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py
new file mode 100644
index 00000000..cb49fd53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+- Add, modify or delete an IPA HBAC rule using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description: Description
+ type: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ type: list
+ elements: str
+ hostcategory:
+ description: Host category
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ type: list
+ elements: str
+ servicecategory:
+ description: Service category
+ choices: ['all']
+ type: str
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ type: list
+ elements: str
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ type: list
+ elements: str
+ sourcehostcategory:
+ description: Source host category
+ choices: ['all']
+ type: str
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure
+ default: "present"
+ choices: ["absent", "disabled", "enabled","present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description: User category
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure rule to allow all users to access any host from any host
+ community.general.ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule with certain limitations
+ community.general.ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule is absent
+ community.general.ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HBACRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
+ return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ servicecategory=dict(type='str', choices=['all']),
+ servicegroup=dict(type='list', elements='str'),
+ sourcehost=dict(type='list', elements='str'),
+ sourcehostcategory=dict(type='str', choices=['all']),
+ sourcehostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py
new file mode 100644
index 00000000..80892c01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+- Add, modify and delete an IPA host using IPA API.
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - A description of this host.
+ type: str
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ type: bool
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ type: str
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ aliases: ["macaddress"]
+ type: list
+ elements: str
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ aliases: ["nshostlocation"]
+ type: str
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ aliases: ["nshardwareplatform"]
+ type: str
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ aliases: ["nsosversion"]
+ type: str
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates will not be checked or changed.
+ - If an empty list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ aliases: ["usercertificate"]
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ update_dns:
+ description:
+ - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than "absent".
+ type: bool
+ random_password:
+ description: Generate a random password to be used in bulk enrollment.
+ type: bool
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host is present
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Generate a random password for bulk enrolment
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: False
+ random_password: True
+
+- name: Ensure host is disabled
+ community.general.ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that all user certificates are removed
+ community.general.ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host and its DNS record is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_dns: True
+'''
+
+RETURN = r'''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_show(self, name):
+ return self._post_json(method='host_show', name=name)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name, update_dns):
+ return self._post_json(method='host_del', name=name, item={'updatedns': update_dns})
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None, random_password=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ if random_password is not None:
+ data['random'] = random_password
+ return data
+
+
+def get_host_diff(client, ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ if not module_host.get('random'):
+ non_updateable_keys.append('random')
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_host)
+
+
+def ensure(module, client):
+ name = module.params['fqdn']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'],
+ random_password=module.params.get('random_password'),
+ )
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ # OTP password generated by FreeIPA is visible only for host_add command
+ # so, return directly from here.
+ return changed, client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(client, ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ ipa_host_show = client.host_show(name=name)
+ if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'):
+ client.host_disable(name=name)
+ return changed, client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ update_dns = module.params.get('update_dns', False)
+ if not module.check_mode:
+ client.host_del(name=name, update_dns=update_dns)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool'),
+ ip_address=dict(type='str'),
+ ns_host_location=dict(type='str', aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', aliases=['nsosversion']),
+ user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'),
+ mac_address=dict(type='list', aliases=['macaddress'], elements='str'),
+ update_dns=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ random_password=dict(type='bool', no_log=False),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py
new file mode 100644
index 00000000..ae1f1a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+- Add, modify and delete an IPA host-group using IPA API.
+options:
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host-group databases is present
+ community.general.ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host-group databases is absent
+ community.general.ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup):
+ return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
+ client.hostgroup_add_host, client.hostgroup_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup,
+ client.hostgroup_remove_hostgroup) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py
new file mode 100644
index 00000000..589a6d5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - A description of this role-group.
+ type: str
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ privilege:
+ description:
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges will be removed.
+ - If option is omitted privileges will not be checked or changed.
+ - If option is passed all assigned privileges that are not passed will be removed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure role is present
+ community.general.ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure role with certain details
+ community.general.ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ privilege:
+ - Group Administrators
+ - User Administrators
+ service:
+ - service01
+
+- name: Ensure role is absent
+ community.general.ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class RoleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+ def role_add_privilege(self, name, item):
+ return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
+
+ def role_remove_privilege(self, name, item):
+ return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(client, ipa_role, module_role):
+ return client.get_diff(ipa_data=ipa_role, module_data=module_role)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ privilege = module.params['privilege']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(client, ipa_role, module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if privilege is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
+ client.role_add_privilege,
+ client.role_remove_privilege) or changed
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ group=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ privilege=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py
new file mode 100644
index 00000000..c13f7ab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_service
+author: Cédric Parent (@cprh)
+short_description: Manage FreeIPA service
+description:
+- Add and delete an IPA service using IPA API.
+options:
+ krbcanonicalname:
+ description:
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ hosts:
+ description:
+ - Defines the list of 'ManagedBy' hosts.
+ required: false
+ type: list
+ elements: str
+ force:
+ description:
+ - Force principal name even if host is not in DNS.
+ required: false
+ type: bool
+ state:
+ description: State to ensure.
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure service is present
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure service is absent
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Changing Managing hosts list
+ community.general.ipa_service:
+ name: http/host01.example.com
+ host:
+ - host01.example.com
+ - host02.example.com
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+service:
+ description: Service as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ServiceIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ServiceIPAClient, self).__init__(module, host, port, protocol)
+
+ def service_find(self, name):
+ return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
+
+ def service_add(self, name, service):
+ return self._post_json(method='service_add', name=name, item=service)
+
+ def service_mod(self, name, service):
+ return self._post_json(method='service_mod', name=name, item=service)
+
+ def service_del(self, name):
+ return self._post_json(method='service_del', name=name)
+
+ def service_disable(self, name):
+ return self._post_json(method='service_disable', name=name)
+
+ def service_add_host(self, name, item):
+ return self._post_json(method='service_add_host', name=name, item={'host': item})
+
+ def service_remove_host(self, name, item):
+ return self._post_json(method='service_remove_host', name=name, item={'host': item})
+
+
+def get_service_dict(force=None, krbcanonicalname=None):
+ data = {}
+ if force is not None:
+ data['force'] = force
+ if krbcanonicalname is not None:
+ data['krbcanonicalname'] = krbcanonicalname
+ return data
+
+
+def get_service_diff(client, ipa_host, module_service):
+ non_updateable_keys = ['force', 'krbcanonicalname']
+ for key in non_updateable_keys:
+ if key in module_service:
+ del module_service[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_service)
+
+
+def ensure(module, client):
+ name = module.params['krbcanonicalname']
+ state = module.params['state']
+ hosts = module.params['hosts']
+
+ ipa_service = client.service_find(name=name)
+ module_service = get_service_dict(force=module.params['force'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_add(name=name, service=module_service)
+ else:
+ diff = get_service_diff(client, ipa_service, module_service)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_service.get(key)
+ client.service_mod(name=name, service=data)
+ if hosts is not None:
+ if 'managedby_host' in ipa_service:
+ for host in ipa_service['managedby_host']:
+ if host not in hosts:
+ if not module.check_mode:
+ client.service_remove_host(name=name, item=host)
+ changed = True
+ for host in hosts:
+ if host not in ipa_service['managedby_host']:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+ else:
+ for host in hosts:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+
+ else:
+ if ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_del(name=name)
+
+ return changed, client.service_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ krbcanonicalname=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ hosts=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = ServiceIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py
new file mode 100644
index 00000000..218951a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_subca
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA Lightweight Sub Certificate Authorities.
+description:
+- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+options:
+ subca_name:
+ description:
+ - The Sub Certificate Authority name which needs to be managed.
+ required: true
+ aliases: ["name"]
+ type: str
+ subca_subject:
+ description:
+ - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ required: true
+ type: str
+ subca_desc:
+ description:
+ - The Sub Certificate Authority's description.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ required: false
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = '''
+- name: Ensure IPA Sub CA is present
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ subca_name: AnsibleSubCA1
+ subca_subject: 'CN=AnsibleSubCA1,O=example.com'
+ subca_desc: Ansible Sub CA
+
+- name: Ensure that IPA Sub CA is removed
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: absent
+ subca_name: AnsibleSubCA1
+
+- name: Ensure that IPA Sub CA is disabled
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: disable
+ subca_name: AnsibleSubCA1
+'''
+
+RETURN = r'''
+subca:
+ description: IPA Sub CA record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SubCAIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SubCAIPAClient, self).__init__(module, host, port, protocol)
+
+ def subca_find(self, subca_name):
+ return self._post_json(method='ca_find', name=subca_name, item=None)
+
+ def subca_add(self, subca_name=None, subject_dn=None, details=None):
+ item = dict(ipacasubjectdn=subject_dn)
+ subca_desc = details.get('description', None)
+ if subca_desc is not None:
+ item.update(description=subca_desc)
+ return self._post_json(method='ca_add', name=subca_name, item=item)
+
+ def subca_mod(self, subca_name=None, diff=None, details=None):
+ item = get_subca_dict(details)
+ for change in diff:
+ update_detail = dict()
+ if item[change] is not None:
+ update_detail.update(setattr="{0}={1}".format(change, item[change]))
+ self._post_json(method='ca_mod', name=subca_name, item=update_detail)
+
+ def subca_del(self, subca_name=None):
+ return self._post_json(method='ca_del', name=subca_name)
+
+ def subca_disable(self, subca_name=None):
+ return self._post_json(method='ca_disable', name=subca_name)
+
+ def subca_enable(self, subca_name=None):
+ return self._post_json(method='ca_enable', name=subca_name)
+
+
+def get_subca_dict(details=None):
+ module_subca = dict()
+ if details['description'] is not None:
+ module_subca['description'] = details['description']
+ if details['subca_subject'] is not None:
+ module_subca['ipacasubjectdn'] = details['subca_subject']
+ return module_subca
+
+
+def get_subca_diff(client, ipa_subca, module_subca):
+ details = get_subca_dict(module_subca)
+ return client.get_diff(ipa_data=ipa_subca, module_data=details)
+
+
+def ensure(module, client):
+ subca_name = module.params['subca_name']
+ subca_subject_dn = module.params['subca_subject']
+ subca_desc = module.params['subca_desc']
+
+ state = module.params['state']
+
+ ipa_subca = client.subca_find(subca_name)
+ module_subca = dict(description=subca_desc,
+ subca_subject=subca_subject_dn)
+
+ changed = False
+ if state == 'present':
+ if not ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca)
+ else:
+ diff = get_subca_diff(client, ipa_subca, module_subca)
+ # IPA does not allow to modify Sub CA's subject DN
+ # So skip it for now.
+ if 'ipacasubjectdn' in diff:
+ diff.remove('ipacasubjectdn')
+ del module_subca['subca_subject']
+
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca)
+ elif state == 'absent':
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_del(subca_name=subca_name)
+ elif state == 'disable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_disable(subca_name=subca_name)
+ elif state == 'enable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_enable(subca_name=subca_name)
+
+ return changed, client.subca_find(subca_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']),
+ subca_subject=dict(type='str', required=True),
+ subca_desc=dict(type='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ client = SubCAIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py
new file mode 100644
index 00000000..aa09e0e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+options:
+ sudocmd:
+ description:
+ - Sudo command.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - A description of this command.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command exists
+ community.general.ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command does not exist
+ community.general.ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
+ return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py
new file mode 100644
index 00000000..96eb6559
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - Group description.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command group exists
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exist
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup):
+ return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py
new file mode 100644
index 00000000..9a0259bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ type: str
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ description:
+ description:
+ - Description of the sudo rule.
+ type: str
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ type: list
+ elements: str
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ type: list
+ elements: str
+ runasusercategory:
+ description:
+ - RunAs User category the rule applies to.
+ choices: ['all']
+ type: str
+ runasgroupcategory:
+ description:
+ - RunAs Group category the rule applies to.
+ choices: ['all']
+ type: str
+ sudoopt:
+ description:
+ - List of options to add to the sudo rule.
+ type: list
+ elements: str
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+ community.general.ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+ community.general.ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
+ runasgroupcategory=None, runasusercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ if runasusercategory is not None:
+ data['ipasudorunasusercategory'] = runasusercategory
+ if runasgroupcategory is not None:
+ data['ipasudorunasgroupcategory'] = runasgroupcategory
+ return data
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ cmd = module.params['cmd']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ runasusercategory = module.params['runasusercategory']
+ runasgroupcategory = module.params['runasgroupcategory']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory,
+ runasusercategory=runasusercategory,
+ runasgroupcategory=runasgroupcategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = client.get_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if runasusercategory is not None:
+ changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
+
+ if runasgroupcategory is not None:
+ changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ # client.modify_if_diff does not work as each option must be removed/added by its own
+ ipa_list = ipa_sudorule.get('ipasudoopt', [])
+ module_list = sudoopt
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_option_ipasudoopt(name, item)
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_option_ipasudoopt(name, item)
+
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cmd=dict(type='list', elements='str'),
+ cmdcategory=dict(type='str', choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ runasusercategory=dict(type='str', choices=['all']),
+ runasgroupcategory=dict(type='str', choices=['all']),
+ sudoopt=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True)
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py
new file mode 100644
index 00000000..fa7b3abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server.
+options:
+ displayname:
+ description: Display name.
+ type: str
+ update_password:
+ description:
+ - Set password for a user.
+ type: str
+ default: 'always'
+ choices: [ always, on_create ]
+ givenname:
+ description: First name.
+ type: str
+ krbpasswordexpiration:
+ description:
+ - Date at which the user password will expire.
+ - In the format YYYYMMddHHmmss.
+ - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ type: str
+ loginshell:
+ description: Login shell.
+ type: str
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for a user.
+ - Will not be set for an existing user unless I(update_password=always), which is the default.
+ type: str
+ sn:
+ description: Surname.
+ type: str
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ type: list
+ elements: str
+ title:
+ description: Title.
+ type: str
+ uid:
+ description: uid of the user.
+ required: true
+ aliases: ["name"]
+ type: str
+ uidnumber:
+ description:
+ - Account Settings UID/Posix User ID number.
+ type: str
+ gidnumber:
+ description:
+ - Posix Group ID.
+ type: str
+ homedirectory:
+ description:
+ - Default home directory of the user.
+ type: str
+ version_added: '0.2.0'
+ userauthtype:
+ description:
+ - The authentication type to use for the user.
+ choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ type: str
+ version_added: '1.2.0'
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = r'''
+- name: Ensure pinky is present and always reset password
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ krbpasswordexpiration: 20200119235959
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkey:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ uidnumber: '1001'
+ gidnumber: '100'
+ homedirectory: /home/pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure brain is absent
+ community.general.ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure pinky is present but don't reset password if already exists
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ password: zounds
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_password: on_create
+
+- name: Ensure pinky is present and using one time password authentication
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ userauthtype: otp
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class UserIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
+ mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
+ title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None,
+ userauthtype=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if krbpasswordexpiration is not None:
+ user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+ if gidnumber is not None:
+ user['gidnumber'] = gidnumber
+ if uidnumber is not None:
+ user['uidnumber'] = uidnumber
+ if homedirectory is not None:
+ user['homedirectory'] = homedirectory
+ if userauthtype is not None:
+ user['ipauserauthtype'] = userauthtype
+
+ return user
+
+
+def get_user_diff(client, ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ hash_algo = 'md5'
+ if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
+ hash_algo = 'sha256'
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+
+ result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
+
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
+ FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
+ for md5 or
+ SHA256:[base64]
+ for sha256
+ :param ssh_key:
+ :param hash_algo:
+ :return:
+ """
+ parts = ssh_key.strip().split()
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ if hash_algo == 'md5':
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ elif hash_algo == 'sha256':
+ fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
+ key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ user_host = parts[2]
+ return "%s %s (%s)" % (key_fp, user_host, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['uid']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'],
+ gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'),
+ homedirectory=module.params.get('homedirectory'),
+ userauthtype=module.params.get('userauthtype'))
+
+ update_password = module.params.get('update_password')
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ if update_password == 'on_create':
+ module_user.pop('userpassword', None)
+ diff = get_user_diff(client, ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(displayname=dict(type='str'),
+ givenname=dict(type='str'),
+ update_password=dict(type='str', default="always",
+ choices=['always', 'on_create'],
+ no_log=False),
+ krbpasswordexpiration=dict(type='str', no_log=False),
+ loginshell=dict(type='str'),
+ mail=dict(type='list', elements='str'),
+ sn=dict(type='str'),
+ uid=dict(type='str', required=True, aliases=['name']),
+ gidnumber=dict(type='str'),
+ uidnumber=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ sshpubkey=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', elements='str'),
+ title=dict(type='str'),
+ homedirectory=dict(type='str'),
+ userauthtype=dict(type='str',
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py
new file mode 100644
index 00000000..3376b8c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_vault
+author: Juan Manuel Parrilla (@jparrill)
+short_description: Manage FreeIPA vaults
+description:
+- Add, modify and delete vaults and secret vaults.
+- KRA service should be enabled to use this module.
+options:
+ cn:
+ description:
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with service.
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with user.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existant vault on IPA server.
+ type: bool
+ default: False
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure vault is present
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+
+- name: Ensure vault is present for Admin user
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure vault is absent
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Modify vault if already exists
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ description: "Vault for test"
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ replace: True
+
+- name: Get vault info if already exists
+ community.general.ipa_vault:
+ name: vault01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+vault:
+ description: Vault as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VaultIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(VaultIPAClient, self).__init__(module, host, port, protocol)
+
+ def vault_find(self, name):
+ return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name})
+
+ def vault_add_internal(self, name, item):
+ return self._post_json(method='vault_add_internal', name=name, item=item)
+
+ def vault_mod_internal(self, name, item):
+ return self._post_json(method='vault_mod_internal', name=name, item=item)
+
+ def vault_del(self, name):
+ return self._post_json(method='vault_del', name=name)
+
+
+def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None):
+ vault = {}
+
+ if description is not None:
+ vault['description'] = description
+ if vault_type is not None:
+ vault['ipavaulttype'] = vault_type
+ if vault_salt is not None:
+ vault['ipavaultsalt'] = vault_salt
+ if vault_public_key is not None:
+ vault['ipavaultpublickey'] = vault_public_key
+ if service is not None:
+ vault['service'] = service
+ return vault
+
+
+def get_vault_diff(client, ipa_vault, module_vault, module):
+ return client.get_diff(ipa_data=ipa_vault, module_data=module_vault)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ user = module.params['username']
+ replace = module.params['replace']
+
+ module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'],
+ vault_salt=module.params['ipavaultsalt'],
+ vault_public_key=module.params['ipavaultpublickey'],
+ service=module.params['service'])
+ ipa_vault = client.vault_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_vault:
+ # New vault
+ changed = True
+ if not module.check_mode:
+ ipa_vault = client.vault_add_internal(name, item=module_vault)
+ else:
+ # Already exists
+ if replace:
+ diff = get_vault_diff(client, ipa_vault, module_vault, module)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_vault.get(key)
+ client.vault_mod_internal(name=name, item=data)
+
+ else:
+ if ipa_vault:
+ changed = True
+ if not module.check_mode:
+ client.vault_del(name)
+
+ return changed, client.vault_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ ipavaulttype=dict(type='str', default='symmetric',
+ choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']),
+ ipavaultsalt=dict(type='str', aliases=['vault_salt']),
+ ipavaultpublickey=dict(type='str', aliases=['vault_public_key']),
+ service=dict(type='str'),
+ replace=dict(type='bool', default=False, choices=[True, False]),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ username=dict(type='list', elements='str', aliases=['user']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['username', 'service']])
+
+ client = VaultIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, vault = ensure(module, client)
+ module.exit_json(changed=changed, vault=vault)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py
new file mode 100644
index 00000000..b27155ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client
+
+short_description: Allows administration of Keycloak clients via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak clients via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client
+ - On C(present), the client will be created (or updated if it exists already).
+ - On C(absent), the client will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client id of client to be worked on. This is usually an alphanumeric name chosen by
+ you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ This is 'clientId' in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ is required. If you specify both, this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as I(client_id))
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client
+ This is 'rootUrl' in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client
+ This is 'adminUrl' in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client
+ This is 'baseUrl' in the Keycloak REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either C(client-secret) or
+ C(client-jwt) can be chosen. When using C(client-secret), the module parameter
+ I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ to configure its behavior.
+ This is 'clientAuthenticatorType' in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ specify a secret here (otherwise one will be generated if it does not exit). If
+ changing this secret, the module will not register a change currently (but the
+ changed secret will be saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration
+ service.
+ This is 'registrationAccessToken' in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - list of default roles for this client. If the client roles referenced do not exist
+ yet, they will be created.
+ This is 'defaultRoles' in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client.
+ This is 'redirectUris' in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+
+ web_origins:
+ description:
+ - List of allowed CORS origins.
+ This is 'webOrigins' in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
+ This is 'notBefore' in the Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only.
+ This is 'bearerOnly' in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access.
+ This is 'consentRequired' in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect).
+ This is 'standardFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect).
+ This is 'implicitFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect).
+ This is 'directAccessGrantsEnabled' in the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect).
+ This is 'serviceAccountsEnabled' in the Keycloak REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect).
+ This is 'authorizationServicesEnabled' in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not.
+ This is 'publicClient' in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not.
+ This is 'frontchannelLogout' in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client (either C(openid-connect) or C(saml).
+ type: str
+ choices: ['openid-connect', 'saml']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client.
+ This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - dict of registered cluster nodes (with C(nodename) as the key and last registration
+ time as the value).
+ This is 'registeredNodes' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field will silently
+ be dropped.
+ This is 'clientTemplate' in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the I(client_template).
+ This is 'useTemplateConfig' in the Keycloak REST API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the I(client_template).
+ This is 'useTemplateScope' in the Keycloak REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the I(client_template).
+ This is 'useTemplateMappers' in the Keycloak REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required.
+ This is 'surrogateAuthRequired' in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - a data structure defining the authorization settings for this client. For reference,
+ please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
+ This is 'authorizationSettings' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client.
+ This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration
+ settings; an example is given in the examples section. While an exhaustive list of
+ permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
+ API does not validate whether a given option is appropriate for the protocol used; if specified
+ anyway, Keycloak will simply not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
+ description:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
+ should be included in the login response.
+
+ saml.client.signature:
+ description:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+
+ saml.encrypt:
+ description:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+
+ saml.force.post.binding:
+ description:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+
+ saml.onetimeuse.condition:
+ description:
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key id in the SAML Extensions element.
+
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+
+
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
+
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely
+ C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding url for the client's single logout service.
+
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding url for the client's single logout service.
+
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
+ OIDC request object. One of C(any), C(none), C(RS256).
+
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
+ public keys.
+
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by
+ client and signed by its key, base64-encoded.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client (minimal example)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: present
+
+- name: Delete a Keycloak client
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: absent
+
+- name: Create or update a Keycloak client (with all the bells and whistles)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ realm: master
+ client_id: test
+ id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
+ name: this_is_a_test
+ description: Description of this wonderful client
+ root_url: https://www.example.com/
+ admin_url: https://www.example.com/admin_url
+ base_url: basepath
+ enabled: True
+ client_authenticator_type: client-secret
+ secret: REALLYWELLKEPTSECRET
+ redirect_uris:
+ - https://www.example.com/*
+ - http://localhost:8888/
+ web_origins:
+ - https://www.example.com/*
+ not_before: 1507825725
+ bearer_only: False
+ consent_required: False
+ standard_flow_enabled: True
+ implicit_flow_enabled: False
+ direct_access_grants_enabled: False
+ service_accounts_enabled: False
+ authorization_services_enabled: False
+ public_client: False
+ frontchannel_logout: False
+ protocol: openid-connect
+ full_scope_allowed: false
+ node_re_registration_timeout: -1
+ client_template: test
+ use_template_config: False
+ use_template_scope: false
+ use_template_mappers: no
+ registered_nodes:
+ node01.example.com: 1507828202
+ registration_access_token: eyJWT_TOKEN
+ surrogate_auth_required: false
+ default_roles:
+ - test01
+ - test02
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ consentRequired: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ saml.authnstatement: True
+ saml.client.signature: True
+ saml.force.post.binding: True
+ saml.server.signature: True
+ saml.signature.algorithm: RSA_SHA256
+ saml.signing.certificate: CERTIFICATEHERE
+ saml.signing.private.key: PRIVATEKEYHERE
+ saml_force_name_id_format: False
+ saml_name_id_format: username
+ saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
+ user.info.response.signature.alg: RS256
+ request.object.signature.alg: RS256
+ use.jwks.url: true
+ jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
+ jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
+
+proposed:
+ description: client representation of proposed changes to client
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+existing:
+ description: client representation of existing client (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+end_state:
+ description: client representation of client after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(clientrep):
+ """ Removes probably sensitive details from a client representation
+
+ :param clientrep: the clientrep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = clientrep.copy()
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ root_url=dict(type='str', aliases=['rootUrl']),
+ admin_url=dict(type='str', aliases=['adminUrl']),
+ base_url=dict(type='str', aliases=['baseUrl']),
+ surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
+ enabled=dict(type='bool'),
+ client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
+ secret=dict(type='str', no_log=True),
+ registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
+ default_roles=dict(type='list', aliases=['defaultRoles']),
+ redirect_uris=dict(type='list', aliases=['redirectUris']),
+ web_origins=dict(type='list', aliases=['webOrigins']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ bearer_only=dict(type='bool', aliases=['bearerOnly']),
+ consent_required=dict(type='bool', aliases=['consentRequired']),
+ standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
+ implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
+ direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
+ service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
+ authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
+ public_client=dict(type='bool', aliases=['publicClient']),
+ frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
+ node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
+ registered_nodes=dict(type='dict', aliases=['registeredNodes']),
+ client_template=dict(type='str', aliases=['clientTemplate']),
+ use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
+ use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
+ use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['client_id', 'id']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ cid = module.params.get('id')
+ state = module.params.get('state')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ client_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+ keycloak_argument_spec().keys()
+ # See whether the client already exists in Keycloak
+ if cid is None:
+ before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
+ if before_client is not None:
+ cid = before_client['id']
+ else:
+ before_client = kc.get_client_by_id(cid, realm=realm)
+
+ if before_client is None:
+ before_client = dict()
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for client_param in client_params:
+ new_param_value = module.params.get(client_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if client_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if client_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+
+ changeset[camel(client_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_client = before_client.copy()
+ updated_client.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ result['existing'] = sanitize_cr(before_client)
+
+ # If the client does not exist yet, before_client is still empty
+ if before_client == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client
+ result['changed'] = True
+ if 'clientId' not in updated_client:
+ module.fail_json(msg='client_id needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(updated_client))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client(updated_client, realm=realm)
+ after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been created.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(updated_client))
+ result['changed'] = (before_client != updated_client)
+
+ module.exit_json(**result)
+
+ kc.update_client(cid, updated_client, realm=realm)
+
+ after_client = kc.get_client_by_id(cid, realm=realm)
+ if before_client == after_client:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(after_client))
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = sanitize_cr(before_client)
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py
new file mode 100644
index 00000000..d68198d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clienttemplate
+
+short_description: Allows administration of Keycloak client templates via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
+
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client template
+ - On C(present), the client template will be created (or updated if it exists already).
+ - On C(absent), the client template will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - Id of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak
+ type: str
+
+ protocol:
+ description:
+ - Type of client template (either C(openid-connect) or C(saml).
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client template.
+ This is 'protocolMappers' in the Keycloak REST API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the "existing" field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various
+ configuration settings, though in the default installation of Keycloak as of 3.4, none
+ are documented or known, so this is usually empty.
+ type: dict
+
+notes:
+- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
+ I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
+ I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
+ they are not available through this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client template (minimal)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+
+- name: Delete Keycloak client template
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ state: absent
+ name: test01
+
+- name: Create or update Keycloak client template (with a protocol mapper)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ full_scope_allowed: false
+ id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
+
+proposed:
+ description: client template representation of proposed changes to client template
+ returned: always
+ type: dict
+ sample: {
+ name: "test01"
+ }
+existing:
+ description: client template representation of existing client template (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+end_state:
+ description: client template representation of client template after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ clientt_params = [x for x in module.params
+ if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm',
+ 'auth_client_secret', 'auth_username', 'auth_password',
+ 'validate_certs', 'realm'] and module.params.get(x) is not None]
+
+ # See whether the client template already exists in Keycloak
+ if cid is None:
+ before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm)
+ if before_clientt is not None:
+ cid = before_clientt['id']
+ else:
+ before_clientt = kc.get_client_template_by_id(cid, realm=realm)
+
+ if before_clientt is None:
+ before_clientt = dict()
+
+ result['existing'] = before_clientt
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for clientt_param in clientt_params:
+ # lists in the Keycloak API are sorted
+ new_param_value = module.params.get(clientt_param)
+ if isinstance(new_param_value, list):
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ changeset[camel(clientt_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_clientt = before_clientt.copy()
+ updated_clientt.update(changeset)
+
+ result['proposed'] = changeset
+
+ # If the client template does not exist yet, before_client is still empty
+ if before_clientt == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client template does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client template
+ result['changed'] = True
+ if 'name' not in updated_clientt:
+ module.fail_json(msg='name needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_clientt)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client_template(updated_clientt, realm=realm)
+ after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm)
+
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been created.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client template
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client template with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=updated_clientt)
+
+ module.exit_json(**result)
+
+ kc.update_client_template(cid, updated_clientt, realm=realm)
+
+ after_clientt = kc.get_client_template_by_id(cid, realm=realm)
+ if before_clientt == after_clientt:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=after_clientt)
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been updated.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = before_clientt
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client_template(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client template %s has been deleted.' % before_clientt['name']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py
new file mode 100644
index 00000000..45b5c290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_group
+
+short_description: Allows administration of Keycloak groups via Keycloak API
+
+description:
+ - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup
+ to the API to translate the name into the group ID.
+
+
+options:
+ state:
+ description:
+ - State of the group.
+ - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the group will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but
+ providing it will reduce the number of API calls required.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+notes:
+ - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ are read-only for groups. This limitation will be removed in a later version of this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Adam Goossens (@adamgoossens)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak group
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak group based on name
+ community.general.keycloak_group:
+ name: my-group-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-group-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+group:
+ description: Group representation of the group after module execution (sample is truncated).
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups will have the same parameters as
+ documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ attributes=dict(type='dict')
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, group='')
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('id')
+ name = module.params.get('name')
+ attributes = module.params.get('attributes')
+
+ before_group = None # current state of the group, for merging.
+
+ # does the group already exist?
+ if gid is None:
+ before_group = kc.get_group_by_name(name, realm=realm)
+ else:
+ before_group = kc.get_group_by_groupid(gid, realm=realm)
+
+ before_group = {} if before_group is None else before_group
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if attributes is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ group_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # build a changeset
+ changeset = {}
+ for param in group_params:
+ new_param_value = module.params.get(param)
+ old_value = before_group[param] if param in before_group else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # prepare the new group
+ updated_group = before_group.copy()
+ updated_group.update(changeset)
+
+ # if before_group is none, the group doesn't exist.
+ if before_group == {}:
+ if state == 'absent':
+ # nothing to do.
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Group does not exist; doing nothing.'
+ result['group'] = dict()
+ module.exit_json(**result)
+
+ # for 'present', create a new group.
+ result['changed'] = True
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new group')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do it for real!
+ kc.create_group(updated_group, realm=realm)
+ after_group = kc.get_group_by_name(name, realm)
+
+ result['group'] = after_group
+ result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'],
+ id=after_group['id'])
+
+ else:
+ if state == 'present':
+ # no changes
+ if updated_group == before_group:
+ result['changed'] = False
+ result['group'] = updated_group
+ result['msg'] = "No changes required to group {name}.".format(name=before_group['name'])
+ module.exit_json(**result)
+
+ # update the existing group
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_group(updated_group, realm=realm)
+
+ after_group = kc.get_group_by_groupid(updated_group['id'], realm=realm)
+
+ result['group'] = after_group
+ result['msg'] = "Group {id} has been updated".format(id=after_group['id'])
+
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ result['group'] = dict()
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete for real
+ gid = before_group['id']
+ kc.delete_group(groupid=gid, realm=realm)
+
+ result['changed'] = True
+ result['msg'] = "Group {name} has been deleted".format(name=before_group['name'])
+
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py
new file mode 100644
index 00000000..aa477e42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: Add or update OpenDJ backend properties
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BackendProp(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'passwordfile']],
+ required_one_of=[['password', 'passwordfile']]
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py
new file mode 100644
index 00000000..fa8ac66c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_firmware
+short_description: Firmware update from a repository on a network share (CIFS, NFS).
+description:
+ - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
+ available updates.
+ - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
+ - All applicable updates contained in the repository are applied to the system.
+ - This feature is available only with iDRAC Enterprise License.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ share_name:
+ description: CIFS or NFS Network share.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ share_mnt:
+ description: Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ type: str
+ required: True
+ reboot:
+ description: Whether to reboots after applying the updates or not.
+ type: bool
+ default: false
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: true
+ catalog_file_name:
+ required: False
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: 'Catalog.xml'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+'''
+
+EXAMPLES = """
+---
+- name: Update firmware from repository on a Network Share
+ community.general.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.0:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_pwd"
+ share_mnt: "/mnt/share"
+ reboot: True
+ job_wait: True
+ catalog_file_name: "Catalog.xml"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Over all firmware update status.
+ returned: always
+ sample: "Successfully updated the firmware."
+update_status:
+ type: dict
+ description: Firmware Update job and progress details from the iDRAC.
+ returned: success
+ sample: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+"""
+
+
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+def _validate_catalog_file(catalog_file_name):
+ normilized_file_name = catalog_file_name.lower()
+ if not normilized_file_name:
+ raise ValueError('catalog_file_name should be a non-empty string.')
+ elif not normilized_file_name.endswith("xml"):
+ raise ValueError('catalog_file_name should be an XML file.')
+
+
+def update_firmware(idrac, module):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'] = False
+ msg['update_status'] = {}
+
+ try:
+ upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
+ mount_point=module.params['share_mnt'],
+ isFolder=False,
+ creds=UserCredentials(
+ module.params['share_user'],
+ module.params['share_password'])
+ )
+
+ idrac.use_redfish = True
+ if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
+ idrac.use_redfish = False
+
+ apply_update = True
+ msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
+ apply_update,
+ module.params['reboot'],
+ module.params['job_wait'])
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if "Status" in msg['update_status']:
+ if msg['update_status']['Status'] == "Success":
+ if module.params['job_wait']:
+ msg['changed'] = True
+ else:
+ module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
+ return msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": True, "type": 'str'},
+
+ "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
+ "reboot": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ },
+
+ supports_check_mode=False)
+
+ try:
+ # Validate the catalog file
+ _validate_catalog_file(module.params['catalog_file_name'])
+ # Connect to iDRAC and update firmware
+ with iDRACConnection(module.params) as idrac:
+ update_status = update_firmware(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
new file mode 100644
index 00000000..ea97ecdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_command
+short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Create BIOS configuration job (schedule BIOS setting update)
+ community.general.idrac_redfish_command:
+ category: Systems
+ command: CreateBiosConfigJob
+ resource_id: System.Embedded.1
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def create_bios_config_job(self):
+ result = {}
+ key = "Bios"
+ jobs = "Jobs"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uris[0])
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
+ "@odata.id"]
+
+ payload = {"TargetSettingsURI": set_bios_attr_uri}
+ response = self.post_request(
+ self.root_uri + self.manager_uri + "/" + jobs, payload)
+ if response['ret'] is False:
+ return response
+
+ response_output = response['resp'].__dict__
+ job_id = response_output["headers"]["Location"]
+ job_id = re.search("JID_.+", job_id).group()
+ # Currently not passing job_id back to user but patch is coming
+ return {'ret': True, 'msg': "Config job %s created" % job_id}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["CreateBiosConfigJob"],
+ "Accounts": [],
+ "Manager": []
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "CreateBiosConfigJob":
+ # execute only if we find a Managers resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ result = rf_utils.create_bios_config_job()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(changed=True, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
new file mode 100644
index 00000000..485d54cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_config
+short_description: Manages servers through iDRAC using Dell Redfish APIs
+description:
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ set or update a configuration attribute.
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Category to execute on iDRAC
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC
+ - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
+ I(SetSystemAttributes) are mutually exclusive commands when C(category)
+ is I(Manager)
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC
+ type: str
+ manager_attribute_name:
+ required: false
+ description:
+ - (deprecated) name of iDRAC attribute to update
+ type: str
+ manager_attribute_value:
+ required: false
+ description:
+ - (deprecated) value of iDRAC attribute to update
+ type: str
+ manager_attributes:
+ required: false
+ description:
+ - dictionary of iDRAC attribute name and value pairs to update
+ default: {}
+ type: 'dict'
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to iDRAC controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Enable NTP and set NTP server and Time zone attributes in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ NTPConfigGroup.1.NTPEnable: "Enabled"
+ NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
+ Time.1.Timezone: "{{ timezone }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable Syslog and set Syslog servers in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SysLog.1.SysLogEnable: "Enabled"
+ SysLog.1.Server1: "{{ syslog_server1 }}"
+ SysLog.1.Server2: "{{ syslog_server2 }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Configure SNMP community string, port, protocol and trap format
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SNMP.1.AgentEnable: "Enabled"
+ SNMP.1.AgentCommunity: "public_community_string"
+ SNMP.1.TrapFormat: "SNMPv1"
+ SNMP.1.SNMPProtocol: "All"
+ SNMP.1.DiscoveryPort: 161
+ SNMP.1.AlertPort: 162
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable CSIOR
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetLifecycleControllerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Set Power Supply Redundancy Policy to A/B Grid Redundant
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetSystemAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments
+)
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def set_manager_attributes(self, command):
+
+ result = {}
+ required_arg_spec = {'manager_attributes': {'required': True}}
+
+ try:
+ check_required_arguments(required_arg_spec, self.module.params)
+
+ except TypeError as e:
+ msg = to_native(e)
+ self.module.fail_json(msg=msg)
+
+ key = "Attributes"
+ command_manager_attributes_uri_map = {
+ "SetManagerAttributes": self.manager_uri,
+ "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
+ "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
+ }
+ manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
+
+ attributes = self.module.params['manager_attributes']
+ manager_attr_name = self.module.params.get('manager_attribute_name')
+ manager_attr_value = self.module.params.get('manager_attribute_value')
+
+ # manager attributes to update
+ if manager_attr_name:
+ attributes.update({manager_attr_name: manager_attr_value})
+
+ attrs_to_patch = {}
+ attrs_skipped = {}
+
+ # Search for key entry and extract URI from it
+ response = self.get_request(self.root_uri + manager_uri + "/" + key)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False,
+ 'msg': "%s: Key %s not found" % (command, key)}
+
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ return {'ret': False,
+ 'msg': "%s: Manager attribute %s not found" % (command, attr_name)}
+
+ # Find out if value is already set to what we want. If yes, exclude
+ # those attributes
+ if data[u'Attributes'][attr_name] == attr_value:
+ attrs_skipped.update({attr_name: attr_value})
+ else:
+ attrs_to_patch.update({attr_name: attr_value})
+
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "Manager attributes already set"}
+
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]
+}
+
+# list of mutually exclusive commands for a category
+CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
+ "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ manager_attribute_name=dict(default=None),
+ manager_attribute_value=dict(default=None),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # check for mutually exclusive commands
+ try:
+ # check_mutually_exclusive accepts a single list or list of lists that
+ # are groups of terms that should be mutually exclusive with one another
+ # and checks that against a dictionary
+ check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
+ dict.fromkeys(command_list, True))
+
+ except TypeError as e:
+ module.fail_json(msg=to_native(e))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
+ result = rf_utils.set_manager_attributes(command)
+
+ if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])):
+ module.deprecate(msg='Arguments `manager_attribute_name` and '
+ '`manager_attribute_value` are deprecated. '
+ 'Use `manager_attributes` instead for passing in '
+ 'the manager attribute name and value pairs',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py
new file mode 100644
index 00000000..39857fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_server_config_profile
+short_description: Export or Import iDRAC Server Configuration Profile (SCP).
+description:
+ - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ command:
+ description:
+ - If C(import), will perform SCP import operations.
+ - If C(export), will perform SCP export operations.
+ choices: ['import', 'export']
+ default: 'export'
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ required: True
+ share_name:
+ description: CIFS or NFS Network Share or a local path.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ scp_file:
+ description: Server Configuration Profile file name. This option is mandatory for C(import) command.
+ type: str
+ scp_components:
+ description:
+ - If C(ALL), this module will import all components configurations from SCP file.
+ - If C(IDRAC), this module will import iDRAC configuration from SCP file.
+ - If C(BIOS), this module will import BIOS configuration from SCP file.
+ - If C(NIC), this module will import NIC configuration from SCP file.
+ - If C(RAID), this module will import RAID configuration from SCP file.
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: 'ALL'
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), it gracefully shuts down the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), it does not reboot the server.
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ choices: ['On' ,'Off']
+ default: 'On'
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ choices: ['JSON', 'XML']
+ default: 'XML'
+ export_use:
+ description: Specify the type of server configuration profile (SCP) to be exported.
+ This option is applicable for C(export) command.
+ choices: ['Default', 'Clone', 'Replace']
+ default: 'Default'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Jagadeesh N V(@jagadeeshnv)"
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Import Server Configuration Profile from a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Import Server Configuration Profile from a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Export Server Configuration Profile to a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+
+- name: Export Server Configuration Profile to a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the import or export SCP job.
+ returned: always
+ sample: "Successfully imported the Server Configuration Profile"
+scp_status:
+ type: dict
+ description: SCP operation job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+'''
+
+import os
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+ from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum,
+ ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum)
+except ImportError:
+ pass
+
+
+def run_import_server_config_profile(idrac, module):
+ """Import Server Configuration Profile from a network share."""
+ target = SCPTargetEnum[module.params['scp_components']]
+ job_wait = module.params['job_wait']
+ end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']]
+ shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(
+ share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']),
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']), isFolder=False)
+ import_status = idrac.config_mgr.scp_import(myshare,
+ target=target, shutdown_type=shutdown_type,
+ end_host_power_state=end_host_power_state,
+ job_wait=job_wait)
+ if not import_status or import_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to import scp.', scp_status=import_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return import_status
+
+
+def run_export_server_config_profile(idrac, module):
+ """Export Server Configuration Profile to a network share."""
+ export_format = ExportFormatEnum[module.params['export_format']]
+ scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower())
+ target = SCPTargetEnum[module.params['scp_components']]
+ export_use = ExportUseEnum[module.params['export_use']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']),
+ isFolder=True)
+ scp_file_name = myshare.new_file(scp_file_name_format)
+ export_status = idrac.config_mgr.scp_export(scp_file_name,
+ target=target,
+ export_format=export_format,
+ export_use=export_use,
+ job_wait=module.params['job_wait'])
+ if not export_status or export_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to export scp.', scp_status=export_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return export_status
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str',
+ "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"required": False,
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
+ "default": 'ALL'},
+
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+ },
+ required_if=[
+ ["command", "import", ["scp_file"]]
+ ],
+ supports_check_mode=False)
+
+ try:
+ changed = False
+ with iDRACConnection(module.params) as idrac:
+ command = module.params['command']
+ if command == 'import':
+ scp_status = run_import_server_config_profile(idrac, module)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ else:
+ scp_status = run_export_server_config_profile(idrac, module)
+ module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command),
+ scp_status=scp_status)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py
new file mode 100644
index 00000000..ca318b4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: imc_rest
+short_description: Manage Cisco IMC hardware through its REST API
+description:
+- Provides direct access to the Cisco IMC REST API.
+- Perform any configuration changes and actions that the Cisco IMC supports.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- lxml
+- xmljson >= 0.1.8
+options:
+ hostname:
+ description:
+ - IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
+ required: true
+ aliases: [ host, ip ]
+ username:
+ description:
+ - Username used to login to the switch.
+ default: admin
+ aliases: [ user ]
+ password:
+ description:
+ - The password to use for authentication.
+ default: password
+ path:
+ description:
+ - Name of the absolute path of the filename that includes the body
+ of the http request being sent to the Cisco IMC REST API.
+ - Parameter C(path) is mutual exclusive with parameter C(content).
+ aliases: [ 'src', 'config_file' ]
+ content:
+ description:
+ - When used instead of C(path), sets the content of the API requests directly.
+ - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
+ - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
+ the Cisco IMC output is subsequently merged.
+ - Parameter C(content) is mutual exclusive with parameter C(path).
+ protocol:
+ description:
+ - Connection protocol to use.
+ default: https
+ choices: [ http, https ]
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+ - This is the time that every single connection (every fragment) can spend.
+ If this C(timeout) is reached, the module will fail with a
+ C(Connection failure) indicating that C(The read operation timed out).
+ default: 60
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+notes:
+- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
+- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
+- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
+ from the previous configuration. As a result, this module will always report a change on subsequent runs.
+ In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+ parameter. Some XML fragments can take longer than the default timeout.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+'''
+
+EXAMPLES = r'''
+- name: Power down server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Configure IMC using multiple XML fragments
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <!-- Configure Serial-on-LAN -->
+ <configConfMo><inConfig>
+ <solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
+ </inConfig></configConfMo>
+
+ <!-- Configure Console Redirection -->
+ <configConfMo><inConfig>
+ <biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
+ vpBaudRate="115200"
+ vpConsoleRedirection="com-0"
+ vpFlowControl="none"
+ vpTerminalType="vt100"
+ vpPuttyKeyPad="LINUX"
+ vpRedirectionAfterPOST="Always Enable"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Enable PXE boot and power-cycle server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <!-- Configure PXE boot -->
+ <configConfMo><inConfig>
+ <lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
+ </inConfig></configConfMo>
+
+ <!-- Power cycle server -->
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Reconfigure IMC to boot from storage
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Add customer description to server
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Disable HTTP and increase session timeout to max value 10800 secs
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <configConfMo><inConfig>
+ <commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
+ </inConfig></configConfMo>
+
+ <configConfMo><inConfig>
+ <commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+aaLogin:
+ description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
+ "outPriv": "admin",
+ "outRefreshPeriod": "600",
+ "outSessionId": "114",
+ "outVersion": "2.0(13e)",
+ "response": "yes"
+ }
+configConfMo:
+ description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+elapsed:
+ description: Elapsed time in seconds
+ returned: always
+ type: int
+ sample: 31
+response:
+ description: HTTP response message, including content length
+ returned: always
+ type: str
+ sample: OK (729 bytes)
+status:
+ description: The HTTP response status code
+ returned: always
+ type: dict
+ sample: 200
+error:
+ description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
+ returned: failed
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "errorCode": "ERR-xml-parse-error",
+ "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
+ "invocationResult": "594",
+ "response": "yes"
+ }
+error_code:
+ description: Cisco IMC error code
+ returned: failed
+ type: str
+ sample: ERR-xml-parse-error
+error_text:
+ description: Cisco IMC error message
+ returned: failed
+ type: str
+ sample: |
+ XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
+input:
+ description: RAW XML input sent to the Cisco IMC, causing the error
+ returned: failed
+ type: str
+ sample: |
+ <configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
+output:
+ description: RAW XML output received from the Cisco IMC, with error details
+ returned: failed
+ type: str
+ sample: >
+ <error cookie=""
+ response="yes"
+ errorCode="ERR-xml-parse-error"
+ invocationResult="594"
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+'''
+
+import atexit
+import datetime
+import itertools
+import os
+import traceback
+
+LXML_ETREE_IMP_ERR = None
+try:
+ import lxml.etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+XMLJSON_COBRA_IMP_ERR = None
+try:
+ from xmljson import cobra
+ HAS_XMLJSON_COBRA = True
+except ImportError:
+ XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
+ HAS_XMLJSON_COBRA = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+def imc_response(module, rawoutput, rawinput=''):
+ ''' Handle IMC returned data '''
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ result = cobra.data(xmloutput)
+
+ # Handle errors
+ if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
+ if rawinput:
+ result['input'] = rawinput
+ result['output'] = rawoutput
+ result['error_code'] = xmloutput.get('errorCode')
+ result['error_text'] = xmloutput.get('errorDescr')
+ module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
+
+ return result
+
+
+def logout(module, url, cookie, timeout):
+ ''' Perform a logout, if needed '''
+ data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
+ resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
+
+
+def merge(one, two):
+ ''' Merge two complex nested datastructures into one'''
+ if isinstance(one, dict) and isinstance(two, dict):
+ copy = dict(one)
+ # copy.update({key: merge(one.get(key, None), two[key]) for key in two})
+ copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
+ return copy
+
+ elif isinstance(one, list) and isinstance(two, list):
+ return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
+
+ return one if two is None else two
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(type='str', required=True, aliases=['host', 'ip']),
+ username=dict(type='str', default='admin', aliases=['user']),
+ password=dict(type='str', default='password', no_log=True),
+ content=dict(type='str'),
+ path=dict(type='path', aliases=['config_file', 'src']),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ timeout=dict(type='int', default=60),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['content', 'path']],
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if not HAS_XMLJSON_COBRA:
+ module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ content = module.params['content']
+ path = module.params['path']
+
+ protocol = module.params['protocol']
+ timeout = module.params['timeout']
+
+ result = dict(
+ failed=False,
+ changed=False,
+ )
+
+ # Report missing file
+ file_exists = False
+ if path:
+ if os.path.isfile(path):
+ file_exists = True
+ else:
+ module.fail_json(msg='Cannot find/access path:\n%s' % path)
+
+ start = datetime.datetime.utcnow()
+
+ # Perform login first
+ url = '%s://%s/nuova' % (protocol, hostname)
+ data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
+ resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or auth['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
+ result.update(imc_response(module, resp.read()))
+
+ # Store cookie for future requests
+ try:
+ cookie = result['aaaLogin']['attributes']['outCookie']
+ except Exception:
+ module.fail_json(msg='Could not find cookie in output', **result)
+
+ # If we would not log out properly, we run out of sessions quickly
+ atexit.register(logout, module, url, cookie, timeout)
+
+ # Prepare request data
+ if content:
+ rawdata = content
+ elif file_exists:
+ with open(path, 'r') as config_object:
+ rawdata = config_object.read()
+
+ # Wrap the XML documents in a <root> element
+ xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
+
+ # Handle each XML document separately in the same session
+ for xmldoc in list(xmldata):
+ if xmldoc.tag is lxml.etree.Comment:
+ continue
+ # Add cookie to XML
+ xmldoc.set('cookie', cookie)
+ data = lxml.etree.tostring(xmldoc)
+
+ # Perform actual request
+ resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or info['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
+
+ # Merge results with previous results
+ rawoutput = resp.read()
+ result = merge(result, imc_response(module, rawoutput, rawinput=data))
+ result['response'] = info['msg']
+ result['status'] = info['status']
+
+ # Check for any changes
+ # NOTE: Unfortunately IMC API always report status as 'modified'
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
+ result['changed'] = ('modified' in results)
+
+ # Report success
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py
new file mode 100644
index 00000000..18a67d01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: imgadm
+short_description: Manage SmartOS images
+description:
+ - Manage SmartOS virtual machine images through imgadm(1M)
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ force:
+ required: false
+ type: bool
+ description:
+ - Force a given operation (where supported by imgadm(1M)).
+ pool:
+ required: false
+ default: zones
+ description:
+ - zpool to import to or delete images from.
+ type: str
+ source:
+ required: false
+ description:
+ - URI for the image source.
+ type: str
+ state:
+ required: true
+ choices: [ present, absent, deleted, imported, updated, vacuumed ]
+ description:
+ - State the object operated on should be in. C(imported) is an alias for
+ for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
+ and C(uuid) to C(*), it will remove all unused images.
+ type: str
+
+ type:
+ required: false
+ choices: [ imgapi, docker, dsapi ]
+ default: imgapi
+ description:
+ - Type for image sources.
+ type: str
+
+ uuid:
+ required: false
+ description:
+ - Image UUID. Can either be a full UUID or C(*) for all images.
+ type: str
+
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Import an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: imported
+
+- name: Delete an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: deleted
+
+- name: Update all images
+ community.general.imgadm:
+ uuid: '*'
+ state: updated
+
+- name: Update a single image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: updated
+
+- name: Add a source
+ community.general.imgadm:
+ source: 'https://datasets.project-fifo.net'
+ state: present
+
+- name: Add a Docker source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ type: docker
+ state: present
+
+- name: Remove a source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ state: absent
+'''
+
+RETURN = '''
+source:
+ description: Source that is managed.
+ returned: When not managing an image.
+ type: str
+ sample: https://datasets.project-fifo.net
+uuid:
+ description: UUID for an image operated on.
+ returned: When not managing an image source.
+ type: str
+ sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'present'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
+# -E option to return any errors in JSON, the generated JSON does not play well
+# with the JSON parsers of Python. The returned message contains '\n' as part of
+# the stacktrace, which breaks the parsers.
+
+
+class Imgadm(object):
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.cmd = module.get_bin_path('imgadm', required=True)
+ self.changed = False
+ self.uuid = module.params['uuid']
+
+ # Since there are a number of (natural) aliases, prevent having to look
+ # them up everytime we operate on `state`.
+ if self.params['state'] in ['present', 'imported', 'updated']:
+ self.present = True
+ else:
+ self.present = False
+
+ # Perform basic UUID validation upfront.
+ if self.uuid and self.uuid != '*':
+ if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
+ module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
+
+ # Helper method to massage stderr
+ def errmsg(self, stderr):
+ match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
+ if match:
+ return match.groups()[0]
+ else:
+ return 'Unexpected failure'
+
+ def update_images(self):
+ if self.uuid == '*':
+ cmd = '{0} update'.format(self.cmd)
+ else:
+ cmd = '{0} update {1}'.format(self.cmd, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
+
+ # There is no feedback from imgadm(1M) to determine if anything
+ # was actually changed. So treat this as an 'always-changes' operation.
+ # Note that 'imgadm -v' produces unparseable JSON...
+ self.changed = True
+
+ def manage_sources(self):
+ force = self.params['force']
+ source = self.params['source']
+ imgtype = self.params['type']
+
+ cmd = '{0} sources'.format(self.cmd)
+
+ if force:
+ cmd += ' -f'
+
+ if self.present:
+ cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
+
+ # Check the various responses.
+ # Note that trying to add a source with the wrong type is handled
+ # above as it results in a non-zero status.
+
+ regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Added "%s" image source "%s"' % (imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = True
+ else:
+ # Type is ignored by imgadm(1M) here
+ cmd += ' -d %s' % source
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
+
+ regex = 'Do not have image source "%s", no change' % source
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Deleted ".*" image source "%s"' % source
+ if re.match(regex, stdout):
+ self.changed = True
+
+ def manage_images(self):
+ pool = self.params['pool']
+ state = self.params['state']
+
+ if state == 'vacuumed':
+ # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
+ cmd = '{0} vacuum -f'.format(self.cmd)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
+ else:
+ if stdout == '':
+ self.changed = False
+ else:
+ self.changed = True
+ if self.present:
+ cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
+
+ regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = '.*ActiveImageNotFound.*'
+ if re.match(regex, stderr):
+ self.changed = False
+
+ regex = 'Imported image {0}.*'.format(self.uuid)
+ if re.match(regex, stdout.splitlines()[-1]):
+ self.changed = True
+ else:
+ cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ regex = '.*ImageNotInstalled.*'
+ if re.match(regex, stderr):
+ # Even if the 'rc' was non-zero (3), we handled the situation
+ # in order to determine if there was a change.
+ self.changed = False
+
+ regex = 'Deleted image {0}'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool'),
+ pool=dict(default='zones'),
+ source=dict(),
+ state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
+ type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
+ uuid=dict()
+ ),
+ # This module relies largely on imgadm(1M) to enforce idempotency, which does not
+ # provide a "noop" (or equivalent) mode to do a dry-run.
+ supports_check_mode=False,
+ )
+
+ imgadm = Imgadm(module)
+
+ uuid = module.params['uuid']
+ source = module.params['source']
+ state = module.params['state']
+
+ result = {'state': state}
+
+ # Either manage sources or images.
+ if source:
+ result['source'] = source
+ imgadm.manage_sources()
+ else:
+ result['uuid'] = uuid
+
+ if state == 'updated':
+ imgadm.update_images()
+ else:
+ # Make sure operate on a single image for the following actions
+ if (uuid == '*') and (state != 'vacuumed'):
+ module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
+ imgadm.manage_images()
+
+ result['changed'] = imgadm.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py
new file mode 100644
index 00000000..ab41f680
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py
@@ -0,0 +1,565 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, <meiliu@fusionlayer.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: infinity
+short_description: Manage Infinity IPAM using Rest API
+description:
+ - Manage Infinity IPAM using REST API.
+author:
+ - Meirong Liu (@MeganLiu)
+options:
+ server_ip:
+ description:
+ - Infinity server_ip with IP address.
+ type: str
+ required: true
+ username:
+ description:
+ - Username to access Infinity.
+ - The user must have REST API privileges.
+ type: str
+ required: true
+ password:
+ description:
+ - Infinity password.
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ]
+ network_id:
+ description:
+ - Network ID.
+ type: str
+ default: ''
+ ip_address:
+ description:
+ - IP Address for a reservation or a release.
+ type: str
+ default: ''
+ network_address:
+ description:
+ - Network address with CIDR format (e.g., 192.168.310.0).
+ type: str
+ default: ''
+ network_size:
+ description:
+ - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
+ type: str
+ default: ''
+ network_name:
+ description:
+ - The name of a network.
+ type: str
+ default: ''
+ network_location:
+ description:
+ - The parent network id for a given network.
+ type: int
+ default: -1
+ network_type:
+ description:
+ - Network type defined by Infinity
+ type: str
+ choices: [ lan, shared_lan, supernet ]
+ default: lan
+ network_family:
+ description:
+ - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
+ type: str
+ choices: [ 4, 6, dual ]
+ default: 4
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ connection: local
+ strategy: debug
+ tasks:
+ - name: Reserve network into Infinity IPAM
+ community.general.infinity:
+ server_ip: 80.75.107.12
+ username: username
+ password: password
+ action: reserve_network
+ network_name: reserve_new_ansible_network
+ network_family: 4
+ network_type: lan
+ network_id: 1201
+ network_size: /28
+ register: infinity
+'''
+
+RETURN = r'''
+network_id:
+ description: id for a given network
+ returned: success
+ type: str
+ sample: '1501'
+ip_info:
+ description: when reserve next available ip address from a network, the ip address info ) is returned.
+ returned: success
+ type: str
+ sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
+network_info:
+ description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ returned: success
+ type: str
+ sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102,
+ "network_size": null,"description": null,"network_location": "3085",
+ "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "network_type": "lan","network_name": "'reserve_new_ansible_network'"}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, json
+from ansible.module_utils.urls import open_url
+
+
+class Infinity(object):
+ """
+ Class for manage REST API calls with the Infinity.
+ """
+
+ def __init__(self, module, server_ip, username, password):
+ self.module = module
+ self.auth_user = username
+ self.auth_pass = password
+ self.base_url = "https://%s/rest/v1/" % (str(server_ip))
+
+ def _get_api_call_ansible_handler(
+ self,
+ method='get',
+ resource_url='',
+ stat_codes=None,
+ params=None,
+ payload_data=None):
+ """
+ Perform the HTTPS request by using ansible get/delete method
+ """
+ stat_codes = [200] if stat_codes is None else stat_codes
+ request_url = str(self.base_url) + str(resource_url)
+ response = None
+ headers = {'Content-Type': 'application/json'}
+ if not request_url:
+ self.module.exit_json(
+ msg="When sending Rest api call , the resource URL is empty, please check.")
+ if payload_data and not isinstance(payload_data, str):
+ payload_data = json.dumps(payload_data)
+ response_raw = open_url(
+ str(request_url),
+ method=method,
+ timeout=20,
+ headers=headers,
+ url_username=self.auth_user,
+ url_password=self.auth_pass,
+ validate_certs=False,
+ force_basic_auth=True,
+ data=payload_data)
+
+ response = response_raw.read()
+ payload = ''
+ if response_raw.code not in stat_codes:
+ self.module.exit_json(
+ changed=False,
+ meta=" openurl response_raw.code show error and error code is %r" %
+ (response_raw.code))
+ else:
+ if isinstance(response, str) and len(response) > 0:
+ payload = response
+ elif method.lower() == 'delete' and response_raw.code == 204:
+ payload = 'Delete is done.'
+ if isinstance(payload, dict) and "text" in payload:
+ self.module.exit_json(
+ changed=False,
+ meta="when calling rest api, returned data is not json ")
+ raise Exception(payload["text"])
+ return payload
+
+ # ---------------------------------------------------------------------------
+ # get_network()
+ # ---------------------------------------------------------------------------
+ def get_network(self, network_id, network_name, limit=-1):
+ """
+ Search network_name inside Infinity by using rest api
+ Network id or network_name needs to be provided
+ return the details of a given with given network_id or name
+ """
+ if network_name is None and network_id is None:
+ self.module.exit_json(
+ msg="You must specify one of the options 'network_name' or 'network_id'.")
+ method = "get"
+ resource_url = ''
+ params = {}
+ response = None
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if network_id is None and network_name:
+ method = "get"
+ resource_url = "search"
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list) and len(
+ response) > 1 and limit == 1:
+ response = response[0]
+ response = json.dumps(response)
+ return response
+
+ # ---------------------------------------------------------------------------
+ # get_network_id()
+ # ---------------------------------------------------------------------------
+ def get_network_id(self, network_name="", network_type='lan'):
+ """
+ query network_id from Infinity via rest api based on given network_name
+ """
+ method = 'get'
+ resource_url = 'search'
+ response = None
+ if network_name is None:
+ self.module.exit_json(
+ msg="You must specify the option 'network_name'")
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ network_id = ""
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list):
+ response = response[0]
+ network_id = response['id']
+ return network_id
+
+ # ---------------------------------------------------------------------------
+ # reserve_next_available_ip()
+ # ---------------------------------------------------------------------------
+ def reserve_next_available_ip(self, network_id=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ network_id: the id of the network that users would like to reserve network from
+ return the next available ip address from that given network
+ """
+ method = "post"
+ resource_url = ''
+ response = None
+ ip_info = ''
+ if not network_id:
+ self.module.exit_json(
+ msg="You must specify the option 'network_id'.")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_ip"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if response and response.find(
+ "[") >= 0 and response.find("]") >= 0:
+ start_pos = response.find("{")
+ end_pos = response.find("}")
+ ip_info = response[start_pos: (end_pos + 1)]
+ return ip_info
+
+ # -------------------------
+ # release_ip()
+ # -------------------------
+ def release_ip(self, network_id="", ip_address=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ """
+ method = "get"
+ resource_url = ''
+ response = None
+ if ip_address is None or network_id is None:
+ self.module.exit_json(
+ msg="You must specify those two options: 'network_id' and 'ip_address'.")
+
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg="There is an error in release ip %s from network %s." %
+ (ip_address, network_id))
+
+ ip_list = json.loads(response)
+ ip_idlist = []
+ for ip_item in ip_list:
+ ip_id = ip_item['id']
+ ip_idlist.append(ip_id)
+ deleted_ip_id = ''
+ for ip_id in ip_idlist:
+ ip_response = ''
+ resource_url = "ip_addresses/" + str(ip_id)
+ ip_response = self._get_api_call_ansible_handler(
+ method,
+ resource_url,
+ stat_codes=[200])
+ if ip_response and json.loads(
+ ip_response)['address'] == str(ip_address):
+ deleted_ip_id = ip_id
+ break
+ if deleted_ip_id:
+ method = 'delete'
+ resource_url = "ip_addresses/" + str(deleted_ip_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release ip, could not find the ip address %r from the given network %r' ." %
+ (ip_address, network_id))
+
+ return response
+
+ # -------------------
+ # delete_network()
+ # -------------------
+ def delete_network(self, network_id="", network_name=""):
+ """
+ delete network from Infinity by using rest api
+ """
+ method = 'delete'
+ resource_url = ''
+ response = None
+ if network_id is None and network_name is None:
+ self.module.exit_json(
+ msg="You must specify one of those options: 'network_id','network_name' .")
+ if network_id is None and network_name:
+ network_id = self.get_network_id(network_name=network_name)
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ return response
+
+ # reserve_network()
+ # ---------------------------------------------------------------------------
+ def reserve_network(self, network_id="",
+ reserved_network_name="", reserved_network_description="",
+ reserved_network_size="", reserved_network_family='4',
+ reserved_network_type='lan', reserved_network_address="",):
+ """
+ Reserves the first available network of specified size from a given supernet
+ <dt>network_name (required)</dt><dd>Name of the network</dd>
+ <dt>description (optional)</dt><dd>Free description</dd>
+ <dt>network_family (required)</dt><dd>Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'</dd>
+ <dt>network_address (optional)</dt><dd>Address of the new network. If not given, the first network available will be created.</dd>
+ <dt>network_size (required)</dt><dd>Size of the new network in /&lt;prefix&gt; notation.</dd>
+ <dt>network_type (required)</dt><dd>Type of network. One of 'supernet', 'lan', 'shared_lan'</dd>
+
+ """
+ method = 'post'
+ resource_url = ''
+ network_info = None
+ if network_id is None or reserved_network_name is None or reserved_network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_network"
+ if not reserved_network_family:
+ reserved_network_family = '4'
+ if not reserved_network_type:
+ reserved_network_type = 'lan'
+ payload_data = {
+ "network_name": reserved_network_name,
+ 'description': reserved_network_description,
+ 'network_size': reserved_network_size,
+ 'network_family': reserved_network_family,
+ 'network_type': reserved_network_type,
+ 'network_location': int(network_id)}
+ if reserved_network_address:
+ payload_data.update({'network_address': reserved_network_address})
+
+ network_info = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[200, 201], payload_data=payload_data)
+
+ return network_info
+
+ # ---------------------------------------------------------------------------
+ # release_network()
+ # ---------------------------------------------------------------------------
+ def release_network(
+ self,
+ network_id="",
+ released_network_name="",
+ released_network_type='lan'):
+ """
+ Release the network with name 'released_network_name' from the given supernet network_id
+ """
+ method = 'get'
+ response = None
+ if network_id is None or released_network_name is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ matched_network_id = ""
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg=" there is an error in releasing network %r from network %s." %
+ (network_id, released_network_name))
+ if response:
+ response = json.loads(response)
+ for child_net in response:
+ if child_net['network'] and child_net['network']['network_name'] == released_network_name:
+ matched_network_id = child_net['network']['network_id']
+ break
+ response = None
+ if matched_network_id:
+ method = 'delete'
+ resource_url = "networks/" + str(matched_network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release network , could not find the network %r from the given superent %r' " %
+ (released_network_name, network_id))
+
+ return response
+
+ # ---------------------------------------------------------------------------
+ # add_network()
+ # ---------------------------------------------------------------------------
+ def add_network(
+ self, network_name="", network_address="",
+ network_size="", network_family='4',
+ network_type='lan', network_location=-1):
+ """
+ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet
+ required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ]
+ """
+ method = 'post'
+ resource_url = 'networks'
+ response = None
+ if network_name is None or network_address is None or network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_name', 'network_address' and 'network_size'")
+
+ if not network_family:
+ network_family = '4'
+ if not network_type:
+ network_type = 'lan'
+ if not network_location:
+ network_location = -1
+ payload_data = {
+ "network_name": network_name,
+ 'network_address': network_address,
+ 'network_size': network_size,
+ 'network_family': network_family,
+ 'network_type': network_type,
+ 'network_location': network_location}
+ response = self._get_api_call_ansible_handler(
+ method='post', resource_url=resource_url,
+ stat_codes=[200], payload_data=payload_data)
+ return response
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_ip=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ network_id=dict(type='str'),
+ ip_address=dict(type='str'),
+ network_name=dict(type='str'),
+ network_location=dict(type='int', default=-1),
+ network_family=dict(type='str', default='4', choices=['4', '6', 'dual']),
+ network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']),
+ network_address=dict(type='str'),
+ network_size=dict(type='str'),
+ action=dict(type='str', required=True, choices=[
+ 'add_network',
+ 'delete_network',
+ 'get_network',
+ 'get_network_id',
+ 'release_ip',
+ 'release_network',
+ 'reserve_network',
+ 'reserve_next_available_ip',
+ ],),
+ ),
+ required_together=(
+ ['username', 'password'],
+ ),
+ )
+ server_ip = module.params["server_ip"]
+ username = module.params["username"]
+ password = module.params["password"]
+ action = module.params["action"]
+ network_id = module.params["network_id"]
+ released_ip = module.params["ip_address"]
+ network_name = module.params["network_name"]
+ network_family = module.params["network_family"]
+ network_type = module.params["network_type"]
+ network_address = module.params["network_address"]
+ network_size = module.params["network_size"]
+ network_location = module.params["network_location"]
+ my_infinity = Infinity(module, server_ip, username, password)
+ result = ''
+ if action == "reserve_next_available_ip":
+ if network_id:
+ result = my_infinity.reserve_next_available_ip(network_id)
+ if not result:
+ result = 'There is an error in calling method of reserve_next_available_ip'
+ module.exit_json(changed=False, meta=result)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_ip":
+ if network_id and released_ip:
+ result = my_infinity.release_ip(
+ network_id=network_id, ip_address=released_ip)
+ module.exit_json(changed=True, meta=result)
+ elif action == "delete_network":
+ result = my_infinity.delete_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "get_network_id":
+ result = my_infinity.get_network_id(
+ network_name=network_name, network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+ elif action == "get_network":
+ result = my_infinity.get_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+ elif action == "reserve_network":
+ result = my_infinity.reserve_network(
+ network_id=network_id,
+ reserved_network_name=network_name,
+ reserved_network_size=network_size,
+ reserved_network_family=network_family,
+ reserved_network_type=network_type,
+ reserved_network_address=network_address)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_network":
+ result = my_infinity.release_network(
+ network_id=network_id,
+ released_network_name=network_name,
+ released_network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "add_network":
+ result = my_infinity.add_network(
+ network_name=network_name,
+ network_location=network_location,
+ network_address=network_address,
+ network_size=network_size,
+ network_family=network_family,
+ network_type=network_type)
+
+ module.exit_json(changed=True, meta=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py
new file mode 100644
index 00000000..7b798c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ state:
+ description:
+ - Determines if the database should be created or destroyed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+
+- name: Destroy database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ ssl: yes
+ validate_certs: yes
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ database_name = influxdb.database_name
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py
new file mode 100644
index 00000000..d9cf5007
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_query
+short_description: Query data points from InfluxDB
+description:
+ - Query data points from InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ query:
+ description:
+ - Query to be executed.
+ required: true
+ type: str
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Query connections
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections"
+ register: connection
+
+- name: Query connections with tags filters
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections where region='zue01' and host='server01'"
+ register: connection
+
+- name: Print results from the query
+ ansible.builtin.debug:
+ var: connection.query_results
+'''
+
+RETURN = r'''
+query_results:
+ description: Result from the query
+ returned: success
+ type: list
+ sample:
+ - mean: 1245.5333333333333
+ time: "1970-01-01T00:00:00Z"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBRead(InfluxDb):
+
+ def read_by_query(self, query):
+ client = self.connect_to_influxdb()
+ try:
+ rs = client.query(query)
+ if rs:
+ return list(rs.get_points())
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ query=dict(type='str', required=True),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influx = AnsibleInfluxDBRead(module)
+ query = module.params.get('query')
+ results = influx.read_by_query(query)
+ module.exit_json(changed=True, query_results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
new file mode 100644
index 00000000..0774915f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of the retention policy.
+ required: true
+ type: str
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data.
+ required: true
+ type: str
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster.
+ required: true
+ type: int
+ default:
+ description:
+ - Sets the retention policy as default retention policy.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: Create 1 hour retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+ ssl: yes
+ validate_certs: yes
+
+- name: Create 1 day retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+
+- name: Create 1 week retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+
+- name: Create infinite retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: INF
+ replication: 1
+ ssl: no
+ validate_certs: no
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+import re
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+from ansible.module_utils._text import to_native
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ hostname = module.params['hostname']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+
+ if not module.check_mode:
+ try:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
+ changed = False
+
+ duration_lookup = duration_regexp.search(duration)
+
+ if duration_lookup.group(2) == 'h':
+ influxdb_duration_format = '%s0m0s' % duration
+ elif duration_lookup.group(2) == 'd':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
+ elif duration_lookup.group(2) == 'w':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
+ elif duration == 'INF':
+ influxdb_duration_format = '0'
+
+ if (not retention_policy['duration'] == influxdb_duration_format or
+ not retention_policy['replicaN'] == int(replication) or
+ not retention_policy['default'] == default):
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ duration=dict(required=True, type='str'),
+ replication=dict(required=True, type='int'),
+ default=dict(default=False, type='bool')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ retention_policy = find_retention_policy(module, client)
+
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py
new file mode 100644
index 00000000..e17e3753
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
+# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_user
+short_description: Manage InfluxDB users
+description:
+ - Manage InfluxDB users.
+author: "Vitaliy Zhhuta (@zhhuta)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ user_name:
+ description:
+ - Name of the user.
+ required: True
+ type: str
+ user_password:
+ description:
+ - Password to be set for the user.
+ required: false
+ type: str
+ admin:
+ description:
+ - Whether the user should be in the admin role or not.
+ - Since version 2.8, the role will also be updated.
+ default: no
+ type: bool
+ state:
+ description:
+ - State of the user.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ grants:
+ description:
+ - Privileges to grant to this user.
+ - Takes a list of dicts containing the "database" and "privilege" keys.
+ - If this argument is not provided, the current grants will be left alone.
+ - If an empty list is provided, all grants for the user will be removed.
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Create a user on localhost using default login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+
+- name: Create a user on localhost using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create an admin user on a remote host using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ admin: yes
+ hostname: "{{ influxdb_hostname }}"
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create a user on localhost with privileges
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ grants:
+ - database: 'collectd'
+ privilege: 'WRITE'
+ - database: 'graphite'
+ privilege: 'READ'
+
+- name: Destroy a user using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ state: absent
+'''
+
+RETURN = r'''
+#only defaults
+'''
+
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils.influxdb as influx
+
+
+def find_user(module, client, user_name):
+ user_result = None
+
+ try:
+ users = client.get_list_users()
+ for user in users:
+ if user['user'] == user_name:
+ user_result = user
+ break
+ except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
+ module.fail_json(msg=to_native(e))
+ return user_result
+
+
+def check_user_password(module, client, user_name, user_password):
+ try:
+ client.switch_user(user_name, user_password)
+ client.get_list_users()
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 401:
+ return False
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ finally:
+ # restore previous user
+ client.switch_user(module.params['username'], module.params['password'])
+ return True
+
+
+def set_user_password(module, client, user_name, user_password):
+ if not module.check_mode:
+ try:
+ client.set_user_password(user_name, user_password)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def create_user(module, client, user_name, user_password, admin):
+ if not module.check_mode:
+ try:
+ client.create_user(user_name, user_password, admin)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def drop_user(module, client, user_name):
+ if not module.check_mode:
+ try:
+ client.drop_user(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def set_user_grants(module, client, user_name, grants):
+ changed = False
+
+ try:
+ current_grants = client.get_list_privileges(user_name)
+ # Fix privileges wording
+ for i, v in enumerate(current_grants):
+ if v['privilege'] == 'ALL PRIVILEGES':
+ v['privilege'] = 'ALL'
+ current_grants[i] = v
+ elif v['privilege'] == 'NO PRIVILEGES':
+ del(current_grants[i])
+
+ # check if the current grants are included in the desired ones
+ for current_grant in current_grants:
+ if current_grant not in grants:
+ if not module.check_mode:
+ client.revoke_privilege(current_grant['privilege'],
+ current_grant['database'],
+ user_name)
+ changed = True
+
+ # check if the desired grants are included in the current ones
+ for grant in grants:
+ if grant not in current_grants:
+ if not module.check_mode:
+ client.grant_privilege(grant['privilege'],
+ grant['database'],
+ user_name)
+ changed = True
+
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ return changed
+
+
+def main():
+ argument_spec = influx.InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ admin=dict(default='False', type='bool'),
+ grants=dict(type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ user_name = module.params['user_name']
+ user_password = module.params['user_password']
+ admin = module.params['admin']
+ grants = module.params['grants']
+ influxdb = influx.InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ user = find_user(module, client, user_name)
+
+ changed = False
+
+ if state == 'present':
+ if user:
+ if not check_user_password(module, client, user_name, user_password) and user_password is not None:
+ set_user_password(module, client, user_name, user_password)
+ changed = True
+
+ try:
+ if admin and not user['admin']:
+ if not module.check_mode:
+ client.grant_admin_privileges(user_name)
+ changed = True
+ elif not admin and user['admin']:
+ if not module.check_mode:
+ client.revoke_admin_privileges(user_name)
+ changed = True
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=to_native(e))
+
+ else:
+ user_password = user_password or ''
+ create_user(module, client, user_name, user_password, admin)
+ changed = True
+
+ if grants is not None:
+ if set_user_grants(module, client, user_name, grants):
+ changed = True
+
+ module.exit_json(changed=changed)
+
+ if state == 'absent':
+ if user:
+ drop_user(module, client, user_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py
new file mode 100644
index 00000000..0dc063a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_write
+short_description: Write data points into InfluxDB
+description:
+ - Write data points into InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ data_points:
+ description:
+ - Data points as dict to write into the database.
+ required: true
+ type: list
+ elements: dict
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Write points into database
+ community.general.influxdb_write:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ data_points:
+ - measurement: connections
+ tags:
+ host: server01
+ region: us-west
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 2000
+ - measurement: connections
+ tags:
+ host: server02
+ region: us-east
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 3000
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBWrite(InfluxDb):
+
+ def write_data_point(self, data_points):
+ client = self.connect_to_influxdb()
+
+ try:
+ client.write_points(data_points)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ data_points=dict(required=True, type='list', elements='dict'),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ influx = AnsibleInfluxDBWrite(module)
+ data_points = module.params.get('data_points')
+ influx.write_data_point(data_points)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py
new file mode 100644
index 00000000..0beaca9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ini_file
+short_description: Tweak settings in INI files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual settings in an INI-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
+ - Adds missing sections if they don't exist.
+ - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
+ - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ no other modifications need to be applied.
+options:
+ path:
+ description:
+ - Path to the INI-style file; this file is created if required.
+ - Before Ansible 2.3 this option was only usable as I(dest).
+ type: path
+ required: true
+ aliases: [ dest ]
+ section:
+ description:
+ - Section name in INI file. This is added if C(state=present) automatically when
+ a single value is being set.
+ - If left empty or set to C(null), the I(option) will be placed before the first I(section).
+ - Using C(null) is also required if the config format does not support sections.
+ type: str
+ required: true
+ option:
+ description:
+ - If set (required for changing a I(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole I(section).
+ type: str
+ value:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ type: str
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ state:
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ no_extra_spaces:
+ description:
+ - Do not insert spaces before and after '=' symbol.
+ type: bool
+ default: no
+ create:
+ description:
+ - If set to C(no), the module will fail if the file does not already exist.
+ - By default it will create the file if it is missing.
+ type: bool
+ default: yes
+ allow_no_value:
+ description:
+ - Allow option without value and without '=' symbol.
+ type: bool
+ default: no
+notes:
+ - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ales Nosek (@noseka1)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' was used instead of 'path'
+- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: '0600'
+ backup: yes
+
+- name: Ensure "temperature=cold is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: yes
+'''
+
+import os
+import re
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def match_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def match_active_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def do_ini(module, filename, section=None, option=None, value=None,
+ state='present', backup=False, no_extra_spaces=False, create=True,
+ allow_no_value=False):
+
+ diff = dict(
+ before='',
+ after='',
+ before_header='%s (content)' % filename,
+ after_header='%s (content)' % filename,
+ )
+
+ if not os.path.exists(filename):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ ini_file = open(filename, 'r')
+ try:
+ ini_lines = ini_file.readlines()
+ finally:
+ ini_file.close()
+
+ if module._diff:
+ diff['before'] = ''.join(ini_lines)
+
+ changed = False
+
+ # ini file could be empty
+ if not ini_lines:
+ ini_lines.append('\n')
+
+ # last line of file may not contain a trailing newline
+ if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
+ ini_lines[-1] += '\n'
+ changed = True
+
+ # append fake section lines to simplify the logic
+ # At top:
+ # Fake random section to do not match any other in the file
+ # Using commit hash as fake section name
+ fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
+
+ # Insert it at the beginning
+ ini_lines.insert(0, '[%s]' % fake_section_name)
+
+ # At botton:
+ ini_lines.append('[')
+
+ # If no section is defined, fake section is used
+ if not section:
+ section = fake_section_name
+
+ within_section = not section
+ section_start = 0
+ msg = 'OK'
+ if no_extra_spaces:
+ assignment_format = '%s=%s\n'
+ else:
+ assignment_format = '%s = %s\n'
+
+ for index, line in enumerate(ini_lines):
+ if line.startswith('[%s]' % section):
+ within_section = True
+ section_start = index
+ elif line.startswith('['):
+ if within_section:
+ if state == 'present':
+ # insert missing option line at the end of the section
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ if not value and allow_no_value:
+ ini_lines.insert(i, '%s\n' % option)
+ else:
+ ini_lines.insert(i, assignment_format % (option, value))
+ msg = 'option added'
+ changed = True
+ break
+ elif state == 'absent' and not option:
+ # remove the entire section
+ del ini_lines[section_start:index]
+ msg = 'section removed'
+ changed = True
+ break
+ else:
+ if within_section and option:
+ if state == 'present':
+ # change the existing option line
+ if match_opt(option, line):
+ if not value and allow_no_value:
+ newline = '%s\n' % option
+ else:
+ newline = assignment_format % (option, value)
+ option_changed = ini_lines[index] != newline
+ changed = changed or option_changed
+ if option_changed:
+ msg = 'option changed'
+ ini_lines[index] = newline
+ if option_changed:
+ # remove all possible option occurrences from the rest of the section
+ index = index + 1
+ while index < len(ini_lines):
+ line = ini_lines[index]
+ if line.startswith('['):
+ break
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ else:
+ index = index + 1
+ break
+ elif state == 'absent':
+ # delete the existing line
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ changed = True
+ msg = 'option changed'
+ break
+
+ # remove the fake section line
+ del ini_lines[0]
+ del ini_lines[-1:]
+
+ if not within_section and option and state == 'present':
+ ini_lines.append('[%s]\n' % section)
+ if not value and allow_no_value:
+ ini_lines.append('%s\n' % option)
+ else:
+ ini_lines.append(assignment_format % (option, value))
+ changed = True
+ msg = 'section and option added'
+
+ if module._diff:
+ diff['after'] = ''.join(ini_lines)
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if backup:
+ backup_file = module.backup_local(filename)
+
+ try:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'w')
+ f.writelines(ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+
+ return (changed, backup_file, diff, msg)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest']),
+ section=dict(type='str', required=True),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ no_extra_spaces=dict(type='bool', default=False),
+ allow_no_value=dict(type='bool', default=False),
+ create=dict(type='bool', default=True)
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ section = module.params['section']
+ option = module.params['option']
+ value = module.params['value']
+ state = module.params['state']
+ backup = module.params['backup']
+ no_extra_spaces = module.params['no_extra_spaces']
+ allow_no_value = module.params['allow_no_value']
+ create = module.params['create']
+
+ (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
+
+ if not module.check_mode and os.path.exists(path):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ results = dict(
+ changed=changed,
+ diff=diff,
+ msg=msg,
+ path=path,
+ )
+ if backup_file is not None:
+ results['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py
new file mode 100644
index 00000000..af7a950a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: installp
+author:
+- Kairo Araujo (@kairoaraujo)
+short_description: Manage packages on AIX
+description:
+ - Manage packages using 'installp' on AIX
+options:
+ accept_license:
+ description:
+ - Whether to accept the license for the package(s).
+ type: bool
+ default: no
+ name:
+ description:
+ - One or more packages to install or remove.
+ - Use C(all) to install all packages available on informed C(repository_path).
+ type: list
+ elements: str
+ required: true
+ aliases: [ pkg ]
+ repository_path:
+ description:
+ - Path with AIX packages (required to install).
+ type: path
+ state:
+ description:
+ - Whether the package needs to be present on or absent from the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+- If the package is already installed, even the package/fileset is new, the module will not install it.
+'''
+
+EXAMPLES = r'''
+- name: Install package foo
+ community.general.installp:
+ name: foo
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master only
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Remove packages bos.sysmgt.nim.master
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_new_pkg(module, package, repository_path):
+ """
+ Check if the package of fileset is correct name and repository path.
+
+ :param module: Ansible module arguments spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package information.
+ """
+
+ if os.path.isdir(repository_path):
+ installp_cmd = module.get_bin_path('installp', True)
+ rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+
+ if package == 'all':
+ pkg_info = "All packages on dir"
+ return True, pkg_info
+
+ else:
+ pkg_info = {}
+ for line in package_result.splitlines():
+ if re.findall(package, line):
+ pkg_name = line.split()[0].strip()
+ pkg_version = line.split()[1].strip()
+ pkg_info[pkg_name] = pkg_version
+
+ return True, pkg_info
+
+ return False, None
+
+ else:
+ module.fail_json(msg="Repository path %s is not valid." % repository_path)
+
+
+def _check_installed_pkg(module, package, repository_path):
+ """
+ Check the package on AIX.
+ It verifies if the package is installed and informations
+
+ :param module: Ansible module parameters spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package data.
+ """
+
+ lslpp_cmd = module.get_bin_path('lslpp', True)
+ rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+
+ if rc == 1:
+ package_state = ' '.join(err.split()[-2:])
+ if package_state == 'not installed.':
+ return False, None
+ else:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ pkg_data = {}
+ full_pkg_data = lslpp_result.splitlines()
+ for line in full_pkg_data:
+ pkg_name, fileset, level = line.split(':')[0:3]
+ pkg_data[pkg_name] = fileset, level
+
+ return True, pkg_data
+
+
+def remove(module, installp_cmd, packages):
+ repository_path = None
+ remove_count = 0
+ removed_pkgs = []
+ not_found_pkg = []
+ for package in packages:
+ pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
+
+ if pkg_check:
+ if not module.check_mode:
+ rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+ remove_count += 1
+ removed_pkgs.append(package)
+
+ else:
+ not_found_pkg.append(package)
+
+ if remove_count > 0:
+ if len(not_found_pkg) > 1:
+ not_found_pkg.insert(0, "Package(s) not found: ")
+
+ changed = True
+ msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
+
+ else:
+ changed = False
+ msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
+
+ return changed, msg
+
+
+def install(module, installp_cmd, packages, repository_path, accept_license):
+ installed_pkgs = []
+ not_found_pkgs = []
+ already_installed_pkgs = {}
+
+ accept_license_param = {
+ True: '-Y',
+ False: '',
+ }
+
+ # Validate if package exists on repository path.
+ for package in packages:
+ pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
+
+ # If package exists on repository path, check if package is installed.
+ if pkg_check:
+ pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
+
+ # If package is already installed.
+ if pkg_check_current:
+ # Check if package is a package and not a fileset, get version
+ # and add the package into already installed list
+ if package in pkg_info.keys():
+ already_installed_pkgs[package] = pkg_info[package][1]
+
+ else:
+ # If the package is not a package but a fileset, confirm
+ # and add the fileset/package into already installed list
+ for key in pkg_info.keys():
+ if package in pkg_info[key]:
+ already_installed_pkgs[package] = pkg_info[key][1]
+
+ else:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp", rc=rc, err=err)
+ installed_pkgs.append(package)
+
+ else:
+ not_found_pkgs.append(package)
+
+ if len(installed_pkgs) > 0:
+ installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
+ else:
+ installed_msg = ''
+
+ if len(not_found_pkgs) > 0:
+ not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
+ else:
+ not_found_msg = ''
+
+ if len(already_installed_pkgs) > 0:
+ already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
+ else:
+ already_installed_msg = ''
+
+ if len(installed_pkgs) > 0:
+ changed = True
+ msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+ else:
+ changed = False
+ msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ repository_path=dict(type='path'),
+ accept_license=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ repository_path = module.params['repository_path']
+ accept_license = module.params['accept_license']
+ state = module.params['state']
+
+ installp_cmd = module.get_bin_path('installp', True)
+
+ if state == 'present':
+ if repository_path is None:
+ module.fail_json(msg="repository_path is required to install package")
+
+ changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
+
+ elif state == 'absent':
+ changed, msg = remove(module, installp_cmd, name)
+
+ else:
+ module.fail_json(changed=False, msg="Unexpected state.")
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py
new file mode 100644
index 00000000..d1e37573
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: interfaces_file
+short_description: Tweak settings in /etc/network/interfaces files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual interface options in an interfaces-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
+ - Read information about interfaces from interfaces-styled files
+options:
+ dest:
+ type: path
+ description:
+ - Path to the interfaces file
+ default: /etc/network/interfaces
+ iface:
+ type: str
+ description:
+ - Name of the interface, required for value changes or option remove
+ address_family:
+ type: str
+ description:
+ - Address family of the interface, useful if same interface name is used for both inet and inet6
+ option:
+ type: str
+ description:
+ - Name of the option, required for value changes or option remove
+ value:
+ type: str
+ description:
+ - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
+ If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
+ C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ state:
+ type: str
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ default: "present"
+ choices: [ "present", "absent" ]
+
+notes:
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+requirements: []
+author: "Roman Belyakovsky (@hryamzik)"
+'''
+
+RETURN = '''
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: "/etc/network/interfaces"
+ifaces:
+ description: interfaces dictionary
+ returned: success
+ type: complex
+ contains:
+ ifaces:
+ description: interface dictionary
+ returned: success
+ type: dict
+ contains:
+ eth0:
+ description: Name of the interface
+ returned: success
+ type: dict
+ contains:
+ address_family:
+ description: interface address family
+ returned: success
+ type: str
+ sample: "inet"
+ method:
+ description: interface method
+ returned: success
+ type: str
+ sample: "manual"
+ mtu:
+ description: other options, all values returned as strings
+ returned: success
+ type: str
+ sample: "1500"
+ pre-up:
+ description: list of C(pre-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ up:
+ description: list of C(up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ post-up:
+ description: list of C(post-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ down:
+ description: list of C(down) scripts
+ returned: success
+ type: list
+ sample:
+ - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+...
+'''
+
+EXAMPLES = '''
+- name: Set eth1 mtu configuration value to 8000
+ community.general.interfaces_file:
+ dest: /etc/network/interfaces.d/eth1.cfg
+ iface: eth1
+ option: mtu
+ value: 8000
+ backup: yes
+ state: present
+ register: eth1_cfg
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def lineDict(line):
+ return {'line': line, 'line_type': 'unknown'}
+
+
+def optionDict(line, iface, option, value, address_family):
+ return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
+
+
+def getValueFromLine(s):
+ spaceRe = re.compile(r'\s+')
+ for m in spaceRe.finditer(s):
+ pass
+ valueEnd = m.start()
+ option = s.split()[0]
+ optionStart = s.find(option)
+ optionLen = len(option)
+ valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
+ return s[valueStart:valueEnd]
+
+
+def read_interfaces_file(module, filename):
+ f = open(filename, 'r')
+ return read_interfaces_lines(module, f)
+
+
+def read_interfaces_lines(module, line_strings):
+ lines = []
+ ifaces = {}
+ currently_processing = None
+ i = 0
+ for line in line_strings:
+ i += 1
+ words = line.split()
+ if len(words) < 1:
+ lines.append(lineDict(line))
+ continue
+ if words[0][0] == "#":
+ lines.append(lineDict(line))
+ continue
+ if words[0] == "mapping":
+ # currmap = calloc(1, sizeof *currmap);
+ lines.append(lineDict(line))
+ currently_processing = "MAPPING"
+ elif words[0] == "source":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-dir":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-directory":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "iface":
+ currif = {
+ "pre-up": [],
+ "up": [],
+ "down": [],
+ "post-up": []
+ }
+ iface_name = words[1]
+ try:
+ currif['address_family'] = words[2]
+ except IndexError:
+ currif['address_family'] = None
+ address_family = currif['address_family']
+ try:
+ currif['method'] = words[3]
+ except IndexError:
+ currif['method'] = None
+
+ ifaces[iface_name] = currif
+ lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
+ currently_processing = "IFACE"
+ elif words[0] == "auto":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0].startswith("allow-"):
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-auto-down":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-scripts":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ else:
+ if currently_processing == "IFACE":
+ option_name = words[0]
+ # TODO: if option_name in currif.options
+ value = getValueFromLine(line)
+ lines.append(optionDict(line, iface_name, option_name, value, address_family))
+ if option_name in ["pre-up", "up", "down", "post-up"]:
+ currif[option_name].append(value)
+ else:
+ currif[option_name] = value
+ elif currently_processing == "MAPPING":
+ lines.append(lineDict(line))
+ elif currently_processing == "NONE":
+ lines.append(lineDict(line))
+ else:
+ module.fail_json(msg="misplaced option %s in line %d" % (line, i))
+ return None, None
+ return lines, ifaces
+
+
+def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
+ value = str(raw_value)
+ changed = False
+
+ iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
+ if address_family is not None:
+ iface_lines = [item for item in iface_lines
+ if "address_family" in item and item["address_family"] == address_family]
+
+ if len(iface_lines) < 1:
+ # interface not found
+ module.fail_json(msg="Error: interface %s not found" % iface)
+ return changed, None
+
+ iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
+ target_options = list(filter(lambda i: i['option'] == option, iface_options))
+
+ if state == "present":
+ if len(target_options) < 1:
+ changed = True
+ # add new option
+ last_line_dict = iface_lines[-1]
+ changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
+ else:
+ if option in ["pre-up", "up", "down", "post-up"]:
+ if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
+ changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
+ else:
+ # if more than one option found edit the last one
+ if target_options[-1]['value'] != value:
+ changed = True
+ target_option = target_options[-1]
+ old_line = target_option['line']
+ old_value = target_option['value']
+ address_family = target_option['address_family']
+ prefix_start = old_line.find(option)
+ optionLen = len(option)
+ old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
+ start = old_value_position.start() + prefix_start + optionLen
+ end = old_value_position.end() + prefix_start + optionLen
+ line = old_line[:start] + value + old_line[end:]
+ index = len(lines) - lines[::-1].index(target_option) - 1
+ lines[index] = optionDict(line, iface, option, value, address_family)
+ elif state == "absent":
+ if len(target_options) >= 1:
+ if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
+ for target_option in filter(lambda i: i['value'] == value, target_options):
+ changed = True
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ changed = True
+ for target_option in target_options:
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
+
+ return changed, lines
+
+
+def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
+ # Changing method of interface is not an addition
+ if option == 'method':
+ changed = False
+ for ln in lines:
+ if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ changed = True
+ ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
+ ln['params']['method'] = value
+ return changed, lines
+
+ last_line = last_line_dict['line']
+ prefix_start = last_line.find(last_line.split()[0])
+ suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
+ prefix = last_line[:prefix_start]
+
+ if len(iface_options) < 1:
+ # interface has no options, ident
+ prefix += " "
+
+ line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
+ option_dict = optionDict(line, iface, option, value, address_family)
+ index = len(lines) - lines[::-1].index(last_line_dict)
+ lines.insert(index, option_dict)
+ return True, lines
+
+
+def write_changes(module, lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
+ f.close()
+ module.atomic_move(tmpfile, os.path.realpath(dest))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', default='/etc/network/interfaces'),
+ iface=dict(type='str'),
+ address_family=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ required_by=dict(
+ option=('iface',),
+ ),
+ )
+
+ dest = module.params['dest']
+ iface = module.params['iface']
+ address_family = module.params['address_family']
+ option = module.params['option']
+ value = module.params['value']
+ backup = module.params['backup']
+ state = module.params['state']
+
+ if option is not None and state == "present" and value is None:
+ module.fail_json(msg="Value must be set if option is defined and state is 'present'")
+
+ lines, ifaces = read_interfaces_file(module, dest)
+
+ changed = False
+
+ if option is not None:
+ changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
+
+ if changed:
+ _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(dest)
+ write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
+
+ module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py
new file mode 100644
index 00000000..50aec392
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# (c) 2017, Arie Bregman <abregman@redhat.com>
+#
+# This file is a module for Ansible that interacts with Network Manager
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ip_netns
+author: "Arie Bregman (@bregman-arie)"
+short_description: Manage network namespaces
+requirements: [ ip ]
+description:
+ - Create or delete network namespaces using the ip command.
+options:
+ name:
+ required: false
+ description:
+ - Name of the namespace
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the namespace should exist
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a namespace named mario
+ community.general.ip_netns:
+ name: mario
+ state: present
+
+- name: Delete a namespace named luigi
+ community.general.ip_netns:
+ name: luigi
+ state: absent
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+class Namespace(object):
+ """Interface to network namespaces. """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+
+ def _netns(self, command):
+ '''Run ip nents command'''
+ return self.module.run_command(['ip', 'netns'] + command)
+
+ def exists(self):
+ '''Check if the namespace already exists'''
+ rc, out, err = self.module.run_command('ip netns list')
+ if rc != 0:
+ self.module.fail_json(msg=to_text(err))
+ return self.name in out
+
+ def add(self):
+ '''Create network namespace'''
+ rtc, out, err = self._netns(['add', self.name])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def delete(self):
+ '''Delete network namespace'''
+ rtc, out, err = self._netns(['del', self.name])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ if self.state == 'present' and self.exists():
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ if not self.exists():
+ self.add()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """Entry point."""
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': None},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ },
+ supports_check_mode=True,
+ )
+
+ network_namespace = Namespace(module)
+ if module.check_mode:
+ network_namespace.check()
+ else:
+ network_namespace.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py
new file mode 100644
index 00000000..756b6cf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_config
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage Global FreeIPA Configuration Settings
+description:
+- Modify global configuration settings of a FreeIPA Server.
+options:
+ ipadefaultloginshell:
+ description: Default shell for new users.
+ aliases: ["loginshell"]
+ type: str
+ ipadefaultemaildomain:
+ description: Default e-mail domain for new users.
+ aliases: ["emaildomain"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure the default login shell is bash.
+ community.general.ipa_config:
+ ipadefaultloginshell: /bin/bash
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default e-mail domain is ansible.com.
+ community.general.ipa_config:
+ ipadefaultemaildomain: ansible.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+config:
+ description: Configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def config_show(self):
+ return self._post_json(method='config_show', name=None)
+
+ def config_mod(self, name, item):
+ return self._post_json(method='config_mod', name=name, item=item)
+
+
+def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
+ config = {}
+ if ipadefaultloginshell is not None:
+ config['ipadefaultloginshell'] = ipadefaultloginshell
+ if ipadefaultemaildomain is not None:
+ config['ipadefaultemaildomain'] = ipadefaultemaildomain
+
+ return config
+
+
+def get_config_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_config = get_config_dict(
+ ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
+ ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
+ )
+ ipa_config = client.config_show()
+ diff = get_config_diff(client, ipa_config, module_config)
+
+ changed = False
+ new_config = {}
+ for module_key in diff:
+ if module_config.get(module_key) != ipa_config.get(module_key, None):
+ changed = True
+ new_config.update({module_key: module_config.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.config_mod(name=None, item=new_config)
+
+ return changed, client.config_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
+ ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = ConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
new file mode 100644
index 00000000..635bf2ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnsrecord
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA DNS records
+description:
+- Add, modify and delete an IPA DNS Record using IPA API.
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which DNS record needs to be managed.
+ required: true
+ type: str
+ record_name:
+ description:
+ - The DNS record name to manage.
+ required: true
+ aliases: ["name"]
+ type: str
+ record_type:
+ description:
+ - The type of DNS record name.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
+ - "'SRV' and 'MX' are added in version 2.8."
+ required: false
+ default: 'A'
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ type: str
+ record_value:
+ description:
+ - Manage DNS record name with this value.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ required: true
+ type: str
+ record_ttl:
+ description:
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of record_value.
+ required: false
+ type: int
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: vm-001
+ record_type: 'AAAA'
+ record_value: '::1'
+
+- name: Ensure that dns record exists with a TTL
+ community.general.ipa_dnsrecord:
+ name: host02
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ record_ttl: 300
+ ipa_host: ipa.example.com
+ ipa_pass: topsecret
+ state: present
+
+- name: Ensure a PTR record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: 2.168.192.in-addr.arpa
+ record_name: 5
+ record_type: 'PTR'
+ record_value: 'internal.ipa.example.com'
+
+- name: Ensure a TXT record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos
+ record_type: 'TXT'
+ record_value: 'EXAMPLE.COM'
+
+- name: Ensure an SRV record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos._udp.example.com
+ record_type: 'SRV'
+ record_value: '10 50 88 ipa.example.com'
+
+- name: Ensure an MX record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: '@'
+ record_type: 'MX'
+ record_value: '1 mailserver.example.com'
+
+- name: Ensure that dns record is removed
+ community.general.ipa_dnsrecord:
+ name: host01
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+dnsrecord:
+ description: DNS record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSRecordIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSRecordIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnsrecord_find(self, zone_name, record_name):
+ if record_name == '@':
+ return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True})
+ else:
+ return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True})
+
+ def dnsrecord_add(self, zone_name=None, record_name=None, details=None):
+ item = dict(idnsname=record_name)
+ if details['record_type'] == 'A':
+ item.update(a_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'AAAA':
+ item.update(aaaa_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'A6':
+ item.update(a6_part_data=details['record_value'])
+ elif details['record_type'] == 'CNAME':
+ item.update(cname_part_hostname=details['record_value'])
+ elif details['record_type'] == 'DNAME':
+ item.update(dname_part_target=details['record_value'])
+ elif details['record_type'] == 'PTR':
+ item.update(ptr_part_hostname=details['record_value'])
+ elif details['record_type'] == 'TXT':
+ item.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV':
+ item.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX':
+ item.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+
+ return self._post_json(method='dnsrecord_add', name=zone_name, item=item)
+
+ def dnsrecord_mod(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+ return self._post_json(method='dnsrecord_mod', name=zone_name, item=item)
+
+ def dnsrecord_del(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ return self._post_json(method='dnsrecord_del', name=zone_name, item=item)
+
+
+def get_dnsrecord_dict(details=None):
+ module_dnsrecord = dict()
+ if details['record_type'] == 'A' and details['record_value']:
+ module_dnsrecord.update(arecord=details['record_value'])
+ elif details['record_type'] == 'AAAA' and details['record_value']:
+ module_dnsrecord.update(aaaarecord=details['record_value'])
+ elif details['record_type'] == 'A6' and details['record_value']:
+ module_dnsrecord.update(a6record=details['record_value'])
+ elif details['record_type'] == 'CNAME' and details['record_value']:
+ module_dnsrecord.update(cnamerecord=details['record_value'])
+ elif details['record_type'] == 'DNAME' and details['record_value']:
+ module_dnsrecord.update(dnamerecord=details['record_value'])
+ elif details['record_type'] == 'PTR' and details['record_value']:
+ module_dnsrecord.update(ptrrecord=details['record_value'])
+ elif details['record_type'] == 'TXT' and details['record_value']:
+ module_dnsrecord.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV' and details['record_value']:
+ module_dnsrecord.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX' and details['record_value']:
+ module_dnsrecord.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ module_dnsrecord.update(dnsttl=details['record_ttl'])
+
+ return module_dnsrecord
+
+
+def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord):
+ details = get_dnsrecord_dict(module_dnsrecord)
+ return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details)
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ record_name = module.params['record_name']
+ record_ttl = module.params.get('record_ttl')
+ state = module.params['state']
+
+ ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name)
+
+ module_dnsrecord = dict(
+ record_type=module.params['record_type'],
+ record_value=module.params['record_value'],
+ record_ttl=to_native(record_ttl, nonstring='passthru'),
+ )
+
+ # ttl is not required to change records
+ if module_dnsrecord['record_ttl'] is None:
+ module_dnsrecord.pop('record_ttl')
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_add(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_mod(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ if ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_del(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+
+ return changed, client.dnsrecord_find(zone_name, record_name)
+
+
+def main():
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ zone_name=dict(type='str', required=True),
+ record_name=dict(type='str', aliases=['name'], required=True),
+ record_type=dict(type='str', default='A', choices=record_types),
+ record_value=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ record_ttl=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = DNSRecordIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
new file mode 100644
index 00000000..1536866c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
+# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnszone
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage FreeIPA DNS Zones
+description:
+- Add and delete an IPA DNS Zones using IPA API
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which needs to be managed.
+ required: true
+ type: str
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ dynamicupdate:
+ description: Apply dynamic update to zone
+ required: false
+ default: "false"
+ choices: ["false", "true"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns zone is present
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+
+- name: Ensure dns zone is present and is dynamic update
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ dynamicupdate: true
+
+- name: Ensure that dns zone is removed
+ community.general.ipa_dnszone:
+ zone_name: example.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+zone:
+ description: DNS zone as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSZoneIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnszone_find(self, zone_name, details=None):
+ itens = {'idnsname': zone_name}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_find',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_add(self, zone_name=None, details=None):
+ itens = {}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_add',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_del(self, zone_name=None, record_name=None, details=None):
+ return self._post_json(
+ method='dnszone_del', name=zone_name, item={})
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ state = module.params['state']
+ dynamicupdate = module.params['dynamicupdate']
+
+ ipa_dnszone = client.dnszone_find(zone_name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate})
+ else:
+ changed = False
+ else:
+ if ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_del(zone_name=zone_name)
+
+ return changed, client.dnszone_find(zone_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(zone_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = DNSZoneIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, zone = ensure(module, client)
+ module.exit_json(changed=changed, zone=zone)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py
new file mode 100644
index 00000000..84ff443a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+- Add, modify and delete group within IPA server
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - Description of the group.
+ type: str
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ type: bool
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ aliases: ['gid']
+ type: str
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If an empty list is passed all groups will be removed from this group.
+ - If option is omitted assigned groups will not be checked or changed.
+ - Groups that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ type: bool
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If an empty list is passed all users will be removed from this group.
+ - If option is omitted assigned users will not be checked or changed.
+ - Users that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure group is present
+ community.general.ipa_group:
+ name: oinstall
+ gidnumber: '54321'
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that groups sysops and appops are assigned to ops but no other group
+ community.general.ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that users linus and larry are assign to the group, but no other user
+ community.general.ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure group is absent
+ community.general.ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class GroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(client, ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ if 'external' in module_group:
+ if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'):
+ del module_group['external']
+
+ return client.get_diff(ipa_data=ipa_group, module_data=module_group)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ user = module.params['user']
+
+ module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
+ gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(client, ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group) or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user) or changed
+
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ external=dict(type='bool'),
+ gidnumber=dict(type='str', aliases=['gid']),
+ group=dict(type='list', elements='str'),
+ nonposix=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
new file mode 100644
index 00000000..cb49fd53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+- Add, modify or delete an IPA HBAC rule using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description: Description
+ type: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ type: list
+ elements: str
+ hostcategory:
+ description: Host category
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ type: list
+ elements: str
+ servicecategory:
+ description: Service category
+ choices: ['all']
+ type: str
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ type: list
+ elements: str
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ type: list
+ elements: str
+ sourcehostcategory:
+ description: Source host category
+ choices: ['all']
+ type: str
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure
+ default: "present"
+ choices: ["absent", "disabled", "enabled","present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description: User category
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure rule to allow all users to access any host from any host
+ community.general.ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule with certain limitations
+ community.general.ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule is absent
+ community.general.ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HBACRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
+ return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ servicecategory=dict(type='str', choices=['all']),
+ servicegroup=dict(type='list', elements='str'),
+ sourcehost=dict(type='list', elements='str'),
+ sourcehostcategory=dict(type='str', choices=['all']),
+ sourcehostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py
new file mode 100644
index 00000000..80892c01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+- Add, modify and delete an IPA host using IPA API.
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - A description of this host.
+ type: str
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ type: bool
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ type: str
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ aliases: ["macaddress"]
+ type: list
+ elements: str
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ aliases: ["nshostlocation"]
+ type: str
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ aliases: ["nshardwareplatform"]
+ type: str
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ aliases: ["nsosversion"]
+ type: str
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates will not be checked or changed.
+ - If an empty list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ aliases: ["usercertificate"]
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ update_dns:
+ description:
+ - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than "absent".
+ type: bool
+ random_password:
+ description: Generate a random password to be used in bulk enrollment.
+ type: bool
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host is present
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Generate a random password for bulk enrolment
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: False
+ random_password: True
+
+- name: Ensure host is disabled
+ community.general.ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that all user certificates are removed
+ community.general.ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host and its DNS record is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_dns: True
+'''
+
+RETURN = r'''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_show(self, name):
+ return self._post_json(method='host_show', name=name)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name, update_dns):
+ return self._post_json(method='host_del', name=name, item={'updatedns': update_dns})
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None, random_password=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ if random_password is not None:
+ data['random'] = random_password
+ return data
+
+
+def get_host_diff(client, ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ if not module_host.get('random'):
+ non_updateable_keys.append('random')
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_host)
+
+
+def ensure(module, client):
+ name = module.params['fqdn']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'],
+ random_password=module.params.get('random_password'),
+ )
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ # OTP password generated by FreeIPA is visible only for host_add command
+ # so, return directly from here.
+ return changed, client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(client, ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ ipa_host_show = client.host_show(name=name)
+ if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'):
+ client.host_disable(name=name)
+ return changed, client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ update_dns = module.params.get('update_dns', False)
+ if not module.check_mode:
+ client.host_del(name=name, update_dns=update_dns)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool'),
+ ip_address=dict(type='str'),
+ ns_host_location=dict(type='str', aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', aliases=['nsosversion']),
+ user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'),
+ mac_address=dict(type='list', aliases=['macaddress'], elements='str'),
+ update_dns=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ random_password=dict(type='bool', no_log=False),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
new file mode 100644
index 00000000..ae1f1a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+- Add, modify and delete an IPA host-group using IPA API.
+options:
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host-group databases is present
+ community.general.ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host-group databases is absent
+ community.general.ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup):
+ return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
+ client.hostgroup_add_host, client.hostgroup_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup,
+ client.hostgroup_remove_hostgroup) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py
new file mode 100644
index 00000000..589a6d5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - A description of this role-group.
+ type: str
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ privilege:
+ description:
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges will be removed.
+ - If option is omitted privileges will not be checked or changed.
+ - If option is passed all assigned privileges that are not passed will be removed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure role is present
+ community.general.ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure role with certain details
+ community.general.ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ privilege:
+ - Group Administrators
+ - User Administrators
+ service:
+ - service01
+
+- name: Ensure role is absent
+ community.general.ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class RoleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+ def role_add_privilege(self, name, item):
+ return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
+
+ def role_remove_privilege(self, name, item):
+ return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(client, ipa_role, module_role):
+ return client.get_diff(ipa_data=ipa_role, module_data=module_role)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ privilege = module.params['privilege']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(client, ipa_role, module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if privilege is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
+ client.role_add_privilege,
+ client.role_remove_privilege) or changed
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ group=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ privilege=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py
new file mode 100644
index 00000000..c13f7ab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_service
+author: Cédric Parent (@cprh)
+short_description: Manage FreeIPA service
+description:
+- Add and delete an IPA service using IPA API.
+options:
+ krbcanonicalname:
+ description:
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ hosts:
+ description:
+ - Defines the list of 'ManagedBy' hosts.
+ required: false
+ type: list
+ elements: str
+ force:
+ description:
+ - Force principal name even if host is not in DNS.
+ required: false
+ type: bool
+ state:
+ description: State to ensure.
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure service is present
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure service is absent
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Changing Managing hosts list
+ community.general.ipa_service:
+ name: http/host01.example.com
+ host:
+ - host01.example.com
+ - host02.example.com
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+service:
+ description: Service as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ServiceIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ServiceIPAClient, self).__init__(module, host, port, protocol)
+
+ def service_find(self, name):
+ return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
+
+ def service_add(self, name, service):
+ return self._post_json(method='service_add', name=name, item=service)
+
+ def service_mod(self, name, service):
+ return self._post_json(method='service_mod', name=name, item=service)
+
+ def service_del(self, name):
+ return self._post_json(method='service_del', name=name)
+
+ def service_disable(self, name):
+ return self._post_json(method='service_disable', name=name)
+
+ def service_add_host(self, name, item):
+ return self._post_json(method='service_add_host', name=name, item={'host': item})
+
+ def service_remove_host(self, name, item):
+ return self._post_json(method='service_remove_host', name=name, item={'host': item})
+
+
+def get_service_dict(force=None, krbcanonicalname=None):
+ data = {}
+ if force is not None:
+ data['force'] = force
+ if krbcanonicalname is not None:
+ data['krbcanonicalname'] = krbcanonicalname
+ return data
+
+
+def get_service_diff(client, ipa_host, module_service):
+ non_updateable_keys = ['force', 'krbcanonicalname']
+ for key in non_updateable_keys:
+ if key in module_service:
+ del module_service[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_service)
+
+
+def ensure(module, client):
+ name = module.params['krbcanonicalname']
+ state = module.params['state']
+ hosts = module.params['hosts']
+
+ ipa_service = client.service_find(name=name)
+ module_service = get_service_dict(force=module.params['force'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_add(name=name, service=module_service)
+ else:
+ diff = get_service_diff(client, ipa_service, module_service)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_service.get(key)
+ client.service_mod(name=name, service=data)
+ if hosts is not None:
+ if 'managedby_host' in ipa_service:
+ for host in ipa_service['managedby_host']:
+ if host not in hosts:
+ if not module.check_mode:
+ client.service_remove_host(name=name, item=host)
+ changed = True
+ for host in hosts:
+ if host not in ipa_service['managedby_host']:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+ else:
+ for host in hosts:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+
+ else:
+ if ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_del(name=name)
+
+ return changed, client.service_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ krbcanonicalname=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ hosts=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = ServiceIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py
new file mode 100644
index 00000000..218951a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_subca
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA Lightweight Sub Certificate Authorities.
+description:
+- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+options:
+ subca_name:
+ description:
+ - The Sub Certificate Authority name which needs to be managed.
+ required: true
+ aliases: ["name"]
+ type: str
+ subca_subject:
+ description:
+ - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ required: true
+ type: str
+ subca_desc:
+ description:
+ - The Sub Certificate Authority's description.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ required: false
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = '''
+- name: Ensure IPA Sub CA is present
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ subca_name: AnsibleSubCA1
+ subca_subject: 'CN=AnsibleSubCA1,O=example.com'
+ subca_desc: Ansible Sub CA
+
+- name: Ensure that IPA Sub CA is removed
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: absent
+ subca_name: AnsibleSubCA1
+
+- name: Ensure that IPA Sub CA is disabled
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: disable
+ subca_name: AnsibleSubCA1
+'''
+
+RETURN = r'''
+subca:
+ description: IPA Sub CA record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SubCAIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SubCAIPAClient, self).__init__(module, host, port, protocol)
+
+ def subca_find(self, subca_name):
+ return self._post_json(method='ca_find', name=subca_name, item=None)
+
+ def subca_add(self, subca_name=None, subject_dn=None, details=None):
+ item = dict(ipacasubjectdn=subject_dn)
+ subca_desc = details.get('description', None)
+ if subca_desc is not None:
+ item.update(description=subca_desc)
+ return self._post_json(method='ca_add', name=subca_name, item=item)
+
+ def subca_mod(self, subca_name=None, diff=None, details=None):
+ item = get_subca_dict(details)
+ for change in diff:
+ update_detail = dict()
+ if item[change] is not None:
+ update_detail.update(setattr="{0}={1}".format(change, item[change]))
+ self._post_json(method='ca_mod', name=subca_name, item=update_detail)
+
+ def subca_del(self, subca_name=None):
+ return self._post_json(method='ca_del', name=subca_name)
+
+ def subca_disable(self, subca_name=None):
+ return self._post_json(method='ca_disable', name=subca_name)
+
+ def subca_enable(self, subca_name=None):
+ return self._post_json(method='ca_enable', name=subca_name)
+
+
+def get_subca_dict(details=None):
+ module_subca = dict()
+ if details['description'] is not None:
+ module_subca['description'] = details['description']
+ if details['subca_subject'] is not None:
+ module_subca['ipacasubjectdn'] = details['subca_subject']
+ return module_subca
+
+
+def get_subca_diff(client, ipa_subca, module_subca):
+ details = get_subca_dict(module_subca)
+ return client.get_diff(ipa_data=ipa_subca, module_data=details)
+
+
+def ensure(module, client):
+ subca_name = module.params['subca_name']
+ subca_subject_dn = module.params['subca_subject']
+ subca_desc = module.params['subca_desc']
+
+ state = module.params['state']
+
+ ipa_subca = client.subca_find(subca_name)
+ module_subca = dict(description=subca_desc,
+ subca_subject=subca_subject_dn)
+
+ changed = False
+ if state == 'present':
+ if not ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca)
+ else:
+ diff = get_subca_diff(client, ipa_subca, module_subca)
+ # IPA does not allow to modify Sub CA's subject DN
+ # So skip it for now.
+ if 'ipacasubjectdn' in diff:
+ diff.remove('ipacasubjectdn')
+ del module_subca['subca_subject']
+
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca)
+ elif state == 'absent':
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_del(subca_name=subca_name)
+ elif state == 'disable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_disable(subca_name=subca_name)
+ elif state == 'enable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_enable(subca_name=subca_name)
+
+ return changed, client.subca_find(subca_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']),
+ subca_subject=dict(type='str', required=True),
+ subca_desc=dict(type='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ client = SubCAIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
new file mode 100644
index 00000000..aa09e0e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+options:
+ sudocmd:
+ description:
+ - Sudo command.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - A description of this command.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command exists
+ community.general.ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command does not exist
+ community.general.ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
+ return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
new file mode 100644
index 00000000..96eb6559
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - Group description.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command group exists
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exist
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup):
+ return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
new file mode 100644
index 00000000..9a0259bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ type: str
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ description:
+ description:
+ - Description of the sudo rule.
+ type: str
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ type: list
+ elements: str
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ type: list
+ elements: str
+ runasusercategory:
+ description:
+ - RunAs User category the rule applies to.
+ choices: ['all']
+ type: str
+ runasgroupcategory:
+ description:
+ - RunAs Group category the rule applies to.
+ choices: ['all']
+ type: str
+ sudoopt:
+ description:
+ - List of options to add to the sudo rule.
+ type: list
+ elements: str
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+ community.general.ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+ community.general.ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
+ runasgroupcategory=None, runasusercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ if runasusercategory is not None:
+ data['ipasudorunasusercategory'] = runasusercategory
+ if runasgroupcategory is not None:
+ data['ipasudorunasgroupcategory'] = runasgroupcategory
+ return data
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ cmd = module.params['cmd']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ runasusercategory = module.params['runasusercategory']
+ runasgroupcategory = module.params['runasgroupcategory']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory,
+ runasusercategory=runasusercategory,
+ runasgroupcategory=runasgroupcategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = client.get_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if runasusercategory is not None:
+ changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
+
+ if runasgroupcategory is not None:
+ changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ # client.modify_if_diff does not work as each option must be removed/added by its own
+ ipa_list = ipa_sudorule.get('ipasudoopt', [])
+ module_list = sudoopt
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_option_ipasudoopt(name, item)
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_option_ipasudoopt(name, item)
+
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cmd=dict(type='list', elements='str'),
+ cmdcategory=dict(type='str', choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ runasusercategory=dict(type='str', choices=['all']),
+ runasgroupcategory=dict(type='str', choices=['all']),
+ sudoopt=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True)
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py
new file mode 100644
index 00000000..fa7b3abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server.
+options:
+ displayname:
+ description: Display name.
+ type: str
+ update_password:
+ description:
+ - Set password for a user.
+ type: str
+ default: 'always'
+ choices: [ always, on_create ]
+ givenname:
+ description: First name.
+ type: str
+ krbpasswordexpiration:
+ description:
+ - Date at which the user password will expire.
+ - In the format YYYYMMddHHmmss.
+ - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ type: str
+ loginshell:
+ description: Login shell.
+ type: str
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for a user.
+ - Will not be set for an existing user unless I(update_password=always), which is the default.
+ type: str
+ sn:
+ description: Surname.
+ type: str
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ type: list
+ elements: str
+ title:
+ description: Title.
+ type: str
+ uid:
+ description: uid of the user.
+ required: true
+ aliases: ["name"]
+ type: str
+ uidnumber:
+ description:
+ - Account Settings UID/Posix User ID number.
+ type: str
+ gidnumber:
+ description:
+ - Posix Group ID.
+ type: str
+ homedirectory:
+ description:
+ - Default home directory of the user.
+ type: str
+ version_added: '0.2.0'
+ userauthtype:
+ description:
+ - The authentication type to use for the user.
+ choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ type: str
+ version_added: '1.2.0'
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = r'''
+- name: Ensure pinky is present and always reset password
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ krbpasswordexpiration: 20200119235959
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkey:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ uidnumber: '1001'
+ gidnumber: '100'
+ homedirectory: /home/pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure brain is absent
+ community.general.ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure pinky is present but don't reset password if already exists
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ password: zounds
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_password: on_create
+
+- name: Ensure pinky is present and using one time password authentication
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ userauthtype: otp
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class UserIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
+ mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
+ title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None,
+ userauthtype=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if krbpasswordexpiration is not None:
+ user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+ if gidnumber is not None:
+ user['gidnumber'] = gidnumber
+ if uidnumber is not None:
+ user['uidnumber'] = uidnumber
+ if homedirectory is not None:
+ user['homedirectory'] = homedirectory
+ if userauthtype is not None:
+ user['ipauserauthtype'] = userauthtype
+
+ return user
+
+
+def get_user_diff(client, ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ hash_algo = 'md5'
+ if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
+ hash_algo = 'sha256'
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+
+ result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
+
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
+ FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
+ for md5 or
+ SHA256:[base64]
+ for sha256
+ :param ssh_key:
+ :param hash_algo:
+ :return:
+ """
+ parts = ssh_key.strip().split()
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ if hash_algo == 'md5':
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ elif hash_algo == 'sha256':
+ fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
+ key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ user_host = parts[2]
+ return "%s %s (%s)" % (key_fp, user_host, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['uid']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'],
+ gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'),
+ homedirectory=module.params.get('homedirectory'),
+ userauthtype=module.params.get('userauthtype'))
+
+ update_password = module.params.get('update_password')
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ if update_password == 'on_create':
+ module_user.pop('userpassword', None)
+ diff = get_user_diff(client, ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(displayname=dict(type='str'),
+ givenname=dict(type='str'),
+ update_password=dict(type='str', default="always",
+ choices=['always', 'on_create'],
+ no_log=False),
+ krbpasswordexpiration=dict(type='str', no_log=False),
+ loginshell=dict(type='str'),
+ mail=dict(type='list', elements='str'),
+ sn=dict(type='str'),
+ uid=dict(type='str', required=True, aliases=['name']),
+ gidnumber=dict(type='str'),
+ uidnumber=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ sshpubkey=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', elements='str'),
+ title=dict(type='str'),
+ homedirectory=dict(type='str'),
+ userauthtype=dict(type='str',
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py
new file mode 100644
index 00000000..3376b8c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_vault
+author: Juan Manuel Parrilla (@jparrill)
+short_description: Manage FreeIPA vaults
+description:
+- Add, modify and delete vaults and secret vaults.
+- KRA service should be enabled to use this module.
+options:
+ cn:
+ description:
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with service.
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with user.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existant vault on IPA server.
+ type: bool
+ default: False
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure vault is present
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+
+- name: Ensure vault is present for Admin user
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure vault is absent
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Modify vault if already exists
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ description: "Vault for test"
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ replace: True
+
+- name: Get vault info if already exists
+ community.general.ipa_vault:
+ name: vault01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+vault:
+ description: Vault as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VaultIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(VaultIPAClient, self).__init__(module, host, port, protocol)
+
+ def vault_find(self, name):
+ return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name})
+
+ def vault_add_internal(self, name, item):
+ return self._post_json(method='vault_add_internal', name=name, item=item)
+
+ def vault_mod_internal(self, name, item):
+ return self._post_json(method='vault_mod_internal', name=name, item=item)
+
+ def vault_del(self, name):
+ return self._post_json(method='vault_del', name=name)
+
+
+def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None):
+ vault = {}
+
+ if description is not None:
+ vault['description'] = description
+ if vault_type is not None:
+ vault['ipavaulttype'] = vault_type
+ if vault_salt is not None:
+ vault['ipavaultsalt'] = vault_salt
+ if vault_public_key is not None:
+ vault['ipavaultpublickey'] = vault_public_key
+ if service is not None:
+ vault['service'] = service
+ return vault
+
+
+def get_vault_diff(client, ipa_vault, module_vault, module):
+ return client.get_diff(ipa_data=ipa_vault, module_data=module_vault)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ user = module.params['username']
+ replace = module.params['replace']
+
+ module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'],
+ vault_salt=module.params['ipavaultsalt'],
+ vault_public_key=module.params['ipavaultpublickey'],
+ service=module.params['service'])
+ ipa_vault = client.vault_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_vault:
+ # New vault
+ changed = True
+ if not module.check_mode:
+ ipa_vault = client.vault_add_internal(name, item=module_vault)
+ else:
+ # Already exists
+ if replace:
+ diff = get_vault_diff(client, ipa_vault, module_vault, module)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_vault.get(key)
+ client.vault_mod_internal(name=name, item=data)
+
+ else:
+ if ipa_vault:
+ changed = True
+ if not module.check_mode:
+ client.vault_del(name)
+
+ return changed, client.vault_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ ipavaulttype=dict(type='str', default='symmetric',
+ choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']),
+ ipavaultsalt=dict(type='str', aliases=['vault_salt']),
+ ipavaultpublickey=dict(type='str', aliases=['vault_public_key']),
+ service=dict(type='str'),
+ replace=dict(type='bool', default=False, choices=[True, False]),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ username=dict(type='list', elements='str', aliases=['user']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['username', 'service']])
+
+ client = VaultIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, vault = ensure(module, client)
+ module.exit_json(changed=changed, vault=vault)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py
new file mode 100644
index 00000000..dcdc5ef8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+author:
+- René Moser (@resmo)
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ type: str
+ default: https://api.ipify.org/
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ type: int
+ default: 10
+ validate_certs:
+ description:
+ - When set to C(NO), SSL certificates will not be validated.
+ type: bool
+ default: yes
+notes:
+ - Visit https://www.ipify.org to get more information.
+'''
+
+EXAMPLES = r'''
+# Gather IP facts from ipify.org
+- name: Get my public IP
+ community.general.ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: Get my public IP
+ community.general.ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = r'''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: str
+ sample: 1.2.3.4
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(to_text(response.read()))
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_url=dict(type='str', default='https://api.ipify.org/'),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
new file mode 100644
index 00000000..f4186cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Aleksei Kostiuk <unitoff@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: "Retrieve IP geolocation facts of a host's IP address"
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+author: "Aleksei Kostiuk (@akostyuk)"
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ type: int
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+ type: str
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: Get IP geolocation data
+ community.general.ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: complex
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: str
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: str
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: str
+ sample: "US"
+ region:
+ description: State or province name
+ type: str
+ sample: "California"
+ city:
+ description: City name
+ type: str
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: str
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: str
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: str
+ sample: "94035"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {0} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py
new file mode 100644
index 00000000..6509ca21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ - "The choices for the device are:
+ - network -- Request network boot
+ - floppy -- Boot from floppy
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request"
+ required: true
+ choices:
+ - network
+ - floppy
+ - hd
+ - safe
+ - optical
+ - setup
+ - default
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ - "The choices for the state are:
+ - present -- Request system turn on
+ - absent -- Request system turn on"
+ default: present
+ choices: [ present, absent ]
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ type: bool
+ default: 'no'
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ type: bool
+ default: 'no'
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: str
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+- name: Ensure bootdevice is HD
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+- name: Ensure bootdevice is not Network
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: network
+ state: absent
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py
new file mode 100644
index 00000000..47840154
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ - "The choices for state are:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'"
+ choices: ['on', 'off', shutdown, reset, boot]
+ required: true
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: str
+ sample: on
+'''
+
+EXAMPLES = '''
+- name: Ensure machine is powered on
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: on
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py
new file mode 100644
index 00000000..56475268
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py
@@ -0,0 +1,649 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables_state
+short_description: Save iptables state into a file or restore it from a file
+version_added: '1.1.0'
+author: quidame (@quidame)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP
+ packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same
+ as the behaviour of the C(iptables-save) and C(iptables-restore) (or
+ C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
+ module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to
+ the host in case of mistake in new ruleset. This module embeds a rollback
+ feature to avoid this, by telling the host to restore previous rules if a
+ cookie is still there after a given delay, and all this time telling the
+ controller to try to remove this cookie on the host through a new
+ connection.
+notes:
+ - The rollback feature is not a module option and depends on task's
+ attributes. To enable it, the module must be played asynchronously, i.e.
+ by setting task attributes I(poll) to C(0), and I(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ still happen if it shall happen, but you will experience a connection
+ timeout instead of more relevant info returned by the module after its
+ failure.
+ - This module supports I(check_mode).
+options:
+ counters:
+ description:
+ - Save or restore the values of all packet and byte counters.
+ - When C(true), the module is not idempotent.
+ type: bool
+ default: false
+ ip_version:
+ description:
+ - Which version of the IP protocol this module should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ modprobe:
+ description:
+ - Specify the path to the C(modprobe) program internally used by iptables
+ related commands to load kernel modules.
+ - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ executable's path.
+ type: path
+ noflush:
+ description:
+ - For I(state=restored), ignored otherwise.
+ - If C(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If C(true), the
+ previous rules are left untouched (but policies are updated anyway,
+ for all built-in chains).
+ type: bool
+ default: false
+ path:
+ description:
+ - The file the iptables state should be saved to.
+ - The file the iptables state should be restored from.
+ type: path
+ required: yes
+ state:
+ description:
+ - Whether the firewall state should be saved (into a file) or restored
+ (from a file).
+ type: str
+ choices: [ saved, restored ]
+ required: yes
+ table:
+ description:
+ - When I(state=restored), restore only the named table even if the input
+ file contains other tables. Fail if the named table is not declared in
+ the file.
+ - When I(state=saved), restrict output to the specified table. If not
+ specified, output includes all active tables.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent instant failure in case
+ multiple instances of the program are running concurrently.
+ type: int
+requirements: [iptables, ip6tables]
+'''
+
+EXAMPLES = r'''
+# This will apply to all loaded/active IPv4 tables.
+- name: Save current state of the firewall in system file
+ community.general.iptables_state:
+ state: saved
+ path: /etc/sysconfig/iptables
+
+# This will apply only to IPv6 filter table.
+- name: save current state of the firewall in system file
+ community.general.iptables_state:
+ ip_version: ipv6
+ table: filter
+ state: saved
+ path: /etc/iptables/rules.v6
+
+# This will load a state from a file, with a rollback in case of access loss
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will load new rules by appending them to the current ones
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ noflush: true
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will only retrieve information
+- name: get current state of the firewall
+ community.general.iptables_state:
+ state: saved
+ path: /tmp/iptables
+ check_mode: yes
+ changed_when: false
+ register: iptables_state
+
+- name: show current state of the firewall
+ ansible.builtin.debug:
+ var: iptables_state.initial_state
+'''
+
+RETURN = r'''
+applied:
+ description: Whether or not the wanted state has been successfully restored.
+ type: bool
+ returned: always
+ sample: true
+initial_state:
+ description: The current state of the firewall when module starts.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD ACCEPT [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+restored:
+ description: The state the module restored, whenever it is finally applied or not.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT DROP [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
+ "-A INPUT -m conntrack --ctstate INVALID -j DROP",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "COMMIT",
+ "# Completed"
+ ]
+saved:
+ description: The iptables state the module saved.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+tables:
+ description: The iptables we have interest for when module starts.
+ type: dict
+ contains:
+ table:
+ description: Policies and rules for all chains of the named table.
+ type: list
+ elements: str
+ sample: |-
+ {
+ "filter": [
+ ":INPUT ACCEPT",
+ ":FORWARD ACCEPT",
+ ":OUTPUT ACCEPT",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "-A INPUT -j REJECT --reject-with icmp-host-prohibited"
+ ],
+ "nat": [
+ ":PREROUTING ACCEPT",
+ ":INPUT ACCEPT",
+ ":OUTPUT ACCEPT",
+ ":POSTROUTING ACCEPT"
+ ]
+ }
+ returned: always
+'''
+
+
+import re
+import os
+import time
+import tempfile
+import filecmp
+import shutil
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+IPTABLES = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+SAVE = dict(
+ ipv4='iptables-save',
+ ipv6='ip6tables-save',
+)
+
+RESTORE = dict(
+ ipv4='iptables-restore',
+ ipv6='ip6tables-restore',
+)
+
+TABLES = ['filter', 'mangle', 'nat', 'raw', 'security']
+
+
+def read_state(b_path):
+ '''
+ Read a file and store its content in a variable as a list.
+ '''
+ with open(b_path, 'r') as f:
+ text = f.read()
+ lines = text.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def write_state(b_path, lines, changed):
+ '''
+ Write given contents to the given path, and return changed status.
+ '''
+ # Populate a temporary file
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in lines:
+ f.write('%s\n' % line)
+
+ # Prepare to copy temporary file to the final destination
+ if not os.path.exists(b_path):
+ b_destdir = os.path.dirname(b_path)
+ destdir = to_native(b_destdir, errors='surrogate_or_strict')
+ if b_destdir and not os.path.exists(b_destdir) and not module.check_mode:
+ try:
+ os.makedirs(b_destdir)
+ except Exception as e:
+ module.fail_json(
+ msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]),
+ initial_state=lines)
+ changed = True
+
+ elif not filecmp.cmp(tmpfile, b_path):
+ changed = True
+
+ # Do it
+ if changed and not module.check_mode:
+ try:
+ shutil.copyfile(tmpfile, b_path)
+ except Exception as e:
+ path = to_native(b_path, errors='surrogate_or_strict')
+ module.fail_json(
+ msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]),
+ initial_state=lines)
+
+ return changed
+
+
+def initialize_from_null_state(initializer, initcommand, table):
+ '''
+ This ensures iptables-state output is suitable for iptables-restore to roll
+ back to it, i.e. iptables-save output is not empty. This also works for the
+ iptables-nft-save alternative.
+ '''
+ if table is None:
+ table = 'filter'
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write('*%s\nCOMMIT\n' % table)
+
+ initializer.append(tmpfile)
+ (rc, out, err) = module.run_command(initializer, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+ return (rc, out, err)
+
+
+def filter_and_format_state(string):
+ '''
+ Remove timestamps to ensure idempotence between runs. Also remove counters
+ by default. And return the result as a list.
+ '''
+ string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string)
+ if not module.params['counters']:
+ string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string)
+ lines = string.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def per_table_state(command, state):
+ '''
+ Convert raw iptables-save output into usable datastructure, for reliable
+ comparisons between initial and final states.
+ '''
+ tables = dict()
+ for t in TABLES:
+ COMMAND = list(command)
+ if '*%s' % t in state.splitlines():
+ COMMAND.extend(['--table', t])
+ (rc, out, err) = module.run_command(COMMAND, check_rc=True)
+ out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out)
+ out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out)
+ table = out.splitlines()
+ while '' in table:
+ table.remove('')
+ tables[t] = table
+ return (tables)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ state=dict(type='str', choices=['saved', 'restored'], required=True),
+ table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ noflush=dict(type='bool', default=False),
+ counters=dict(type='bool', default=False),
+ modprobe=dict(type='path'),
+ ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'),
+ wait=dict(type='int'),
+ _timeout=dict(type='int'),
+ _back=dict(type='path'),
+ ),
+ required_together=[
+ ['_timeout', '_back'],
+ ],
+ supports_check_mode=True,
+ )
+
+ # We'll parse iptables-restore stderr
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ path = module.params['path']
+ state = module.params['state']
+ table = module.params['table']
+ noflush = module.params['noflush']
+ counters = module.params['counters']
+ modprobe = module.params['modprobe']
+ ip_version = module.params['ip_version']
+ wait = module.params['wait']
+ _timeout = module.params['_timeout']
+ _back = module.params['_back']
+
+ bin_iptables = module.get_bin_path(IPTABLES[ip_version], True)
+ bin_iptables_save = module.get_bin_path(SAVE[ip_version], True)
+ bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True)
+
+ os.umask(0o077)
+ changed = False
+ COMMANDARGS = []
+ INITCOMMAND = [bin_iptables_save]
+ INITIALIZER = [bin_iptables_restore]
+ TESTCOMMAND = [bin_iptables_restore, '--test']
+
+ if counters:
+ COMMANDARGS.append('--counters')
+
+ if table is not None:
+ COMMANDARGS.extend(['--table', table])
+
+ if wait is not None:
+ TESTCOMMAND.extend(['--wait', '%s' % wait])
+
+ if modprobe is not None:
+ b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict')
+ if not os.path.exists(b_modprobe):
+ module.fail_json(msg="modprobe %s not found" % modprobe)
+ if not os.path.isfile(b_modprobe):
+ module.fail_json(msg="modprobe %s not a file" % modprobe)
+ if not os.access(b_modprobe, os.R_OK):
+ module.fail_json(msg="modprobe %s not readable" % modprobe)
+ if not os.access(b_modprobe, os.X_OK):
+ module.fail_json(msg="modprobe %s not executable" % modprobe)
+ COMMANDARGS.extend(['--modprobe', modprobe])
+ INITIALIZER.extend(['--modprobe', modprobe])
+ INITCOMMAND.extend(['--modprobe', modprobe])
+ TESTCOMMAND.extend(['--modprobe', modprobe])
+
+ SAVECOMMAND = list(COMMANDARGS)
+ SAVECOMMAND.insert(0, bin_iptables_save)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if state == 'restored':
+ if not os.path.exists(b_path):
+ module.fail_json(msg="Source %s not found" % path)
+ if not os.path.isfile(b_path):
+ module.fail_json(msg="Source %s not a file" % path)
+ if not os.access(b_path, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % path)
+ state_to_restore = read_state(b_path)
+ else:
+ cmd = ' '.join(SAVECOMMAND)
+
+ (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True)
+
+ # The issue comes when wanting to restore state from empty iptable-save's
+ # output... what happens when, say:
+ # - no table is specified, and iptables-save's output is only nat table;
+ # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # of the host;
+ # then trying to roll iptables state back to the previous (working) setup
+ # doesn't override current filter table because no filter table is stored
+ # in the backup ! So we have to ensure tables to be restored have a backup
+ # in case of rollback.
+ if table is None:
+ if state == 'restored':
+ for t in TABLES:
+ if '*%s' % t in state_to_restore:
+ if len(stdout) == 0 or '*%s' % t not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t)
+ elif len(stdout) == 0:
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter')
+
+ elif state == 'restored' and '*%s' % table not in state_to_restore:
+ module.fail_json(msg="Table %s to restore not defined in %s" % (table, path))
+
+ elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table)
+
+ initial_state = filter_and_format_state(stdout)
+ if initial_state is None:
+ module.fail_json(msg="Unable to initialize firewall from NULL state.")
+
+ # Depending on the value of 'table', initref_state may differ from
+ # initial_state.
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_before = per_table_state(SAVECOMMAND, stdout)
+ initref_state = filter_and_format_state(stdout)
+
+ if state == 'saved':
+ changed = write_state(b_path, initref_state, changed)
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ saved=initref_state)
+
+ #
+ # All remaining code is for state=restored
+ #
+
+ MAINCOMMAND = list(COMMANDARGS)
+ MAINCOMMAND.insert(0, bin_iptables_restore)
+
+ if wait is not None:
+ MAINCOMMAND.extend(['--wait', '%s' % wait])
+
+ if _back is not None:
+ b_back = to_bytes(_back, errors='surrogate_or_strict')
+ garbage = write_state(b_back, initref_state, changed)
+ BACKCOMMAND = list(MAINCOMMAND)
+ BACKCOMMAND.append(_back)
+
+ if noflush:
+ MAINCOMMAND.append('--noflush')
+
+ MAINCOMMAND.append(path)
+ cmd = ' '.join(MAINCOMMAND)
+
+ TESTCOMMAND = list(MAINCOMMAND)
+ TESTCOMMAND.insert(1, '--test')
+ error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore))
+
+ # Due to a bug in iptables-nft-restore --test, we have to validate tables
+ # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003).
+ for t in tables_before:
+ testcommand = list(TESTCOMMAND)
+ testcommand.extend(['--table', t])
+ (rc, stdout, stderr) = module.run_command(testcommand)
+
+ if 'Another app is currently holding the xtables lock' in stderr:
+ error_msg = stderr
+
+ if rc != 0:
+ cmd = ' '.join(testcommand)
+ module.fail_json(
+ msg=error_msg,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ if module.check_mode:
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in initial_state:
+ f.write('%s\n' % line)
+
+ if filecmp.cmp(tmpfile, b_path):
+ restored_state = initial_state
+ else:
+ restored_state = state_to_restore
+
+ else:
+ # Let time enough to the plugin to retrieve async status of the module
+ # in case of bad option type/value and the like.
+ if _back is not None:
+ b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict')
+ while True:
+ if os.path.exists(b_starter):
+ os.remove(b_starter)
+ break
+ else:
+ time.sleep(0.01)
+ continue
+
+ (rc, stdout, stderr) = module.run_command(MAINCOMMAND)
+ if 'Another app is currently holding the xtables lock' in stderr:
+ module.fail_json(
+ msg=stderr,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ restored_state = filter_and_format_state(stdout)
+
+ if restored_state != initref_state and restored_state != initial_state:
+ if module.check_mode:
+ changed = True
+ else:
+ tables_after = per_table_state(SAVECOMMAND, stdout)
+ if tables_after != tables_before:
+ changed = True
+
+ if _back is None or module.check_mode:
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # The rollback implementation currently needs:
+ # Here:
+ # * test existence of the backup file, exit with success if it doesn't exist
+ # * otherwise, restore iptables from this file and return failure
+ # Action plugin:
+ # * try to remove the backup file
+ # * wait async task is finished and retrieve its final status
+ # * modify it and return the result
+ # Task:
+ # * task attribute 'async' set to the same value (or lower) than ansible
+ # timeout
+ # * task attribute 'poll' equals 0
+ #
+ for x in range(_timeout):
+ if os.path.exists(b_back):
+ time.sleep(1)
+ continue
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # Here we are: for whatever reason, but probably due to the current ruleset,
+ # the action plugin (i.e. on the controller) was unable to remove the backup
+ # cookie, so we restore initial state from it.
+ (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True)
+ os.remove(b_back)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_rollback = per_table_state(SAVECOMMAND, stdout)
+
+ msg = (
+ "Failed to confirm state restored from %s after %ss. "
+ "Firewall has been rolled back to its initial state." % (path, _timeout)
+ )
+
+ module.fail_json(
+ changed=(tables_before != tables_rollback),
+ msg=msg,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
new file mode 100644
index 00000000..355c7034
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ipwcli_dns
+
+short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+
+version_added: '0.2.0'
+
+description:
+ - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
+
+requirements:
+ - ipwcli (installed on Ericsson IPWorks)
+
+notes:
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
+
+options:
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [ NAPTR, SRV, A, AAAA ]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for C(type=A) or C(type=AAAA)
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for C(type=SRV)
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for C(type=SRV)
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
+
+author:
+ - Christian Wollinger (@cwollinger)
+'''
+
+EXAMPLES = '''
+- name: Create A record
+ community.general.ipwcli_dns:
+ dnsname: example.com
+ type: A
+ container: ZoneOne
+ address: 127.0.0.1
+
+- name: Remove SRV record if exists
+ community.general.ipwcli_dns:
+ dnsname: _sip._tcp.test.example.com
+ type: SRV
+ container: ZoneOne
+ ttl: 100
+ state: absent
+ target: example.com
+ port: 5060
+
+- name: Create NAPTR record
+ community.general.ipwcli_dns:
+ dnsname: test.example.com
+ type: NAPTR
+ preference: 10
+ container: ZoneOne
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.com.'
+ flags: S
+'''
+
+RETURN = '''
+record:
+ description: The created record from the input params
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+class ResourceRecord(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.dnsname = module.params['dnsname']
+ self.dnstype = module.params['type']
+ self.container = module.params['container']
+ self.address = module.params['address']
+ self.ttl = module.params['ttl']
+ self.state = module.params['state']
+ self.priority = module.params['priority']
+ self.weight = module.params['weight']
+ self.port = module.params['port']
+ self.target = module.params['target']
+ self.order = module.params['order']
+ self.preference = module.params['preference']
+ self.flags = module.params['flags']
+ self.service = module.params['service']
+ self.replacement = module.params['replacement']
+ self.user = module.params['username']
+ self.password = module.params['password']
+
+ def create_naptrrecord(self):
+ # create NAPTR record with the given params
+ record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"'
+ % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement))
+ return record
+
+ def create_srvrecord(self):
+ # create SRV record with the given params
+ record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s'
+ % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target))
+ return record
+
+ def create_arecord(self):
+ # create A record with the given params
+ if self.dnstype == 'AAAA':
+ record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+ else:
+ record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+
+ return record
+
+ def list_record(self, record):
+ # check if the record exists via list on ipwcli
+ search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=search)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or
+ ('NAPTRRecord %s' % self.dnsname in out and rc == 0)):
+ return True, rc, out, err
+
+ return False, rc, out, err
+
+ def deploy_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'create %s' % (record)
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) created.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record creation failed', stderr=out)
+
+ def delete_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) were updated.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record deletion failed', stderr=out)
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ dnsname=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
+ container=dict(type='str', required=True),
+ address=dict(type='str', required=False),
+ ttl=dict(type='int', required=False, default=3600),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priority=dict(type='int', required=False, default=10),
+ weight=dict(type='int', required=False, default=10),
+ port=dict(type='int', required=False),
+ target=dict(type='str', required=False),
+ order=dict(type='int', required=False),
+ preference=dict(type='int', required=False),
+ flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str', required=False),
+ replacement=dict(type='str', required=False),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ # define result
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ record=''
+ )
+
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ['type', 'A', ['address']],
+ ['type', 'AAAA', ['address']],
+ ['type', 'SRV', ['port', 'target']],
+ ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']],
+ ],
+ supports_check_mode=True
+ )
+
+ user = ResourceRecord(module)
+
+ if user.dnstype == 'NAPTR':
+ record = user.create_naptrrecord()
+ elif user.dnstype == 'SRV':
+ record = user.create_srvrecord()
+ elif user.dnstype == 'A' or user.dnstype == 'AAAA':
+ record = user.create_arecord()
+
+ found, rc, out, err = user.list_record(record)
+
+ if found and user.state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.delete_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ elif not found and user.state == 'present':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.deploy_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ else:
+ result['changed'] = False
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py
new file mode 100644
index 00000000..1c050fc1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: irc
+short_description: Send a message to an IRC channel or a nick
+description:
+ - Send a message to an IRC channel or a nick. This is a very simplistic implementation.
+options:
+ server:
+ type: str
+ description:
+ - IRC server name/address
+ default: localhost
+ port:
+ type: int
+ description:
+ - IRC server port number
+ default: 6667
+ nick:
+ type: str
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ default: ansible
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ topic:
+ type: str
+ description:
+ - Set the channel topic
+ color:
+ type: str
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ aliases: [colour]
+ channel:
+ type: str
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ nick_to:
+ type: list
+ elements: str
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ key:
+ type: str
+ description:
+ - Channel key
+ passwd:
+ type: str
+ description:
+ - Server password
+ timeout:
+ type: int
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ type: bool
+ default: 'no'
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ type: bool
+ default: 'yes'
+ style:
+ type: str
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ default: none
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to an IRC channel from nick ansible
+ community.general.irc:
+ server: irc.example.net
+ channel: #t1
+ msg: Hello world
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ nick_to:
+ - nick1
+ - nick2
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+import time
+import traceback
+
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+ nick_to = [] if nick_to is None else nick_to
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except Exception:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except Exception:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+
+ if passwd:
+ irc.send(to_bytes('PASS %s\r\n' % passwd))
+ irc.send(to_bytes('NICK %s\r\n' % nick))
+ irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += to_native(irc.recv(1024))
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ time.sleep(0.5)
+
+ if channel:
+ if key:
+ irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
+ else:
+ irc.send(to_bytes('JOIN %s\r\n' % channel))
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += to_native(irc.recv(1024))
+ if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ time.sleep(0.5)
+
+ if topic is not None:
+ irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
+ time.sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
+ if channel:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
+ time.sleep(1)
+ if part:
+ if channel:
+ irc.send(to_bytes('PART %s\r\n' % channel))
+ irc.send(to_bytes('QUIT\r\n'))
+ time.sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list', elements='str'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception as e:
+ module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py
new file mode 100644
index 00000000..bf6359b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_create
+short_description: Generate ISO file with specified files or folders
+description:
+ - This module is used to generate ISO file with specified path of files.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+requirements:
+- "pycdlib"
+- "python >= 2.7"
+version_added: '0.2.0'
+
+options:
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
+ - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
+ underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
+ names are limited to 255 characters.'
+ type: list
+ required: yes
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - Will create intermediate folders when they does not exist.
+ type: path
+ required: yes
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values C(1), C(2), C(3), C(4) are supported.
+ - The default value is level C(1), which is the most conservative, level C(3) is recommended.
+ - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set C(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are C(1), C(2), or C(3).
+ - Level C(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to C(True), then version 2.60 of the UDF spec is used.
+ - If not specified or set to C(False), then no UDF support is added.
+ type: bool
+ default: False
+'''
+
+EXAMPLES = r'''
+- name: Create an ISO file
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ interchange_level: 3
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ rock_ridge: 1.09
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - ./windows_config/Autounattend.xml
+ dest_iso: ./test.iso
+ interchange_level: 3
+ joliet: 3
+ vol_ident: WIN_AUTOINSTALL
+'''
+
+RETURN = r'''
+source_file:
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
+created_iso:
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
+interchange_level:
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
+vol_ident:
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
+joliet:
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
+rock_ridge:
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
+udf:
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: False
+'''
+
+import os
+import traceback
+
+PYCDLIB_IMP_ERR = None
+try:
+ import pycdlib
+ HAS_PYCDLIB = True
+except ImportError:
+ PYCDLIB_IMP_ERR = traceback.format_exc()
+ HAS_PYCDLIB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot,
+ # followed by a maximum 3 character extension, followed by a semicolon and a version
+ file_name = os.path.basename(file_path)
+ if '.' not in file_name:
+ file_in_iso_path = file_path.upper() + '.;1'
+ else:
+ file_in_iso_path = file_path.upper() + ';1'
+ if rock_ridge:
+ rr_name = file_name
+ if use_joliet:
+ joliet_path = file_path
+ if use_udf:
+ udf_path = file_path
+ try:
+ iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err)))
+
+
+def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ iso_dir_path = dir_path.upper()
+ if rock_ridge:
+ rr_name = os.path.basename(dir_path)
+ if use_joliet:
+ joliet_path = iso_dir_path
+ if use_udf:
+ udf_path = iso_dir_path
+ try:
+ iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err)))
+
+
+def main():
+ argument_spec = dict(
+ src_files=dict(type='list', required=True, elements='path'),
+ dest_iso=dict(type='path', required=True),
+ interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ vol_ident=dict(type='str'),
+ rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']),
+ joliet=dict(type='int', choices=[1, 2, 3]),
+ udf=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_PYCDLIB:
+ module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR)
+
+ src_file_list = module.params.get('src_files')
+ if src_file_list and len(src_file_list) == 0:
+ module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.')
+ for src_file in src_file_list:
+ if not os.path.exists(src_file):
+ module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file)
+
+ dest_iso = module.params.get('dest_iso')
+ if dest_iso and len(dest_iso) == 0:
+ module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.')
+
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ # will create intermediate dir for new ISO file
+ try:
+ os.makedirs(dest_iso_dir)
+ except OSError as err:
+ module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err)))
+
+ volume_id = module.params.get('vol_ident')
+ if volume_id is None:
+ volume_id = ''
+ inter_level = module.params.get('interchange_level')
+ rock_ridge = module.params.get('rock_ridge')
+ use_joliet = module.params.get('joliet')
+ use_udf = None
+ if module.params['udf']:
+ use_udf = '2.60'
+
+ result = dict(
+ changed=False,
+ source_file=src_file_list,
+ created_iso=dest_iso,
+ interchange_level=inter_level,
+ vol_ident=volume_id,
+ rock_ridge=rock_ridge,
+ joliet=use_joliet,
+ udf=use_udf
+ )
+ if not module.check_mode:
+ iso_file = pycdlib.PyCdlib()
+ iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf)
+
+ for src_file in src_file_list:
+ # if specify a dir then go through the dir to add files and dirs
+ if os.path.isdir(src_file):
+ dir_list = []
+ file_list = []
+ src_file = src_file.rstrip('/')
+ dir_name = os.path.basename(src_file)
+ add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+
+ # get dir list and file list
+ for path, dirs, files in os.walk(src_file):
+ for filename in files:
+ file_list.append(os.path.join(path, filename))
+ for dir in dirs:
+ dir_list.append(os.path.join(path, dir))
+ for new_dir in dir_list:
+ add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1],
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+ for new_file in file_list:
+ add_file(module, iso_file=iso_file, src_file=new_file,
+ file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+ # if specify a file then add this file directly to the '/' path in ISO
+ else:
+ add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file),
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+
+ iso_file.write(dest_iso)
+ iso_file.close()
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py
new file mode 100644
index 00000000..0c73ac96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Jeroen Hoekx (@jhoekx)
+- Matt Robinson (@ribbons)
+- Dag Wieers (@dagwieers)
+module: iso_extract
+short_description: Extract files from an ISO image
+description:
+- This module has two possible ways of operation.
+- If 7zip is installed on the system, this module extracts files from an ISO
+ into a temporary directory and copies files to a given destination,
+ if needed.
+- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
+ mounts the ISO image to a temporary location, and copies files to a given
+ destination, if needed.
+requirements:
+- Either 7z (from I(7zip) or I(p7zip) package)
+- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
+options:
+ image:
+ description:
+ - The ISO image to extract files from.
+ type: path
+ required: yes
+ aliases: [ path, src ]
+ dest:
+ description:
+ - The destination directory to extract files to.
+ type: path
+ required: yes
+ files:
+ description:
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
+ type: list
+ elements: str
+ required: yes
+ force:
+ description:
+ - If C(yes), which will replace the remote file when contents are different than the source.
+ - If C(no), the file will only be extracted and copied if the destination does not already exist.
+ - Alias C(thirsty) has been deprecated and will be removed in community.general 3.0.0.
+ type: bool
+ default: yes
+ aliases: [ thirsty ]
+ executable:
+ description:
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ type: path
+ default: '7z'
+notes:
+- Only the file checksum (content) is taken into account when extracting files
+ from the ISO image. If C(force=no), only checks the presence of the file.
+- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
+ requiring root access. This is no longer needed with the introduction of 7zip
+ for extraction.
+'''
+
+EXAMPLES = r'''
+- name: Extract kernel and ramdisk from a LiveCD
+ community.general.iso_extract:
+ image: /tmp/rear-test.iso
+ dest: /tmp/virt-rear/
+ files:
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+'''
+
+RETURN = r'''
+#
+'''
+
+import os.path
+import shutil
+import tempfile
+
+try: # python 3.3+
+ from shlex import quote
+except ImportError: # older python
+ from pipes import quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(type='path', required=True, aliases=['path', 'src']),
+ dest=dict(type='path', required=True),
+ files=dict(type='list', elements='str', required=True),
+ force=dict(type='bool', default=True, aliases=['thirsty']),
+ executable=dict(type='path'), # No default on purpose
+ ),
+ supports_check_mode=True,
+ )
+ image = module.params['image']
+ dest = module.params['dest']
+ files = module.params['files']
+ force = module.params['force']
+ executable = module.params['executable']
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = dict(
+ changed=False,
+ dest=dest,
+ image=image,
+ )
+
+ # We want to know if the user provided it or not, so we set default here
+ if executable is None:
+ executable = '7z'
+
+ binary = module.get_bin_path(executable, None)
+
+ # When executable was provided and binary not found, warn user !
+ if module.params['executable'] is not None and not binary:
+ module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
+
+ if not os.path.exists(dest):
+ module.fail_json(msg="Directory '%s' does not exist" % dest)
+
+ if not os.path.exists(os.path.dirname(image)):
+ module.fail_json(msg="ISO image '%s' does not exist" % image)
+
+ result['files'] = []
+ extract_files = list(files)
+
+ if not force:
+ # Check if we have to process any files based on existence
+ for f in files:
+ dest_file = os.path.join(dest, os.path.basename(f))
+ if os.path.exists(dest_file):
+ result['files'].append(dict(
+ checksum=None,
+ dest=dest_file,
+ src=f,
+ ))
+ extract_files.remove(f)
+
+ if not extract_files:
+ module.exit_json(**result)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ # Use 7zip when we have a binary, otherwise try to mount
+ if binary:
+ cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
+ else:
+ cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ result.update(dict(
+ cmd=cmd,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ ))
+ shutil.rmtree(tmp_dir)
+
+ if binary:
+ module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
+ else:
+ module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
+
+ try:
+ for f in extract_files:
+ tmp_src = os.path.join(tmp_dir, f)
+ if not os.path.exists(tmp_src):
+ module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
+
+ src_checksum = module.sha1(tmp_src)
+
+ dest_file = os.path.join(dest, os.path.basename(f))
+
+ if os.path.exists(dest_file):
+ dest_checksum = module.sha1(dest_file)
+ else:
+ dest_checksum = None
+
+ result['files'].append(dict(
+ checksum=src_checksum,
+ dest=dest_file,
+ src=f,
+ ))
+
+ if src_checksum != dest_checksum:
+ if not module.check_mode:
+ shutil.copy(tmp_src, dest_file)
+
+ result['changed'] = True
+ finally:
+ if not binary:
+ module.run_command('umount "%s"' % tmp_dir)
+
+ shutil.rmtree(tmp_dir)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py
new file mode 100644
index 00000000..68e2c593
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+options:
+ user:
+ type: str
+ description:
+ - User as which to connect
+ required: true
+ password:
+ type: str
+ description:
+ - password for user to connect
+ required: true
+ to:
+ type: str
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ host:
+ type: str
+ description:
+ - host to connect, overrides user info
+ port:
+ type: int
+ description:
+ - port to connect to, overrides default
+ default: 5222
+ encoding:
+ type: str
+ description:
+ - message encoding
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to a user
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
+
+- name: Send a message to a room
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
+
+- name: Send a message, specifying the host and port
+ community.general.jabber:
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
+'''
+
+import time
+import traceback
+
+HAS_XMPP = True
+XMPP_IMP_ERR = None
+try:
+ import xmpp
+except ImportError:
+ XMPP_IMP_ERR = traceback.format_exc()
+ HAS_XMPP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False, default=5222, type='int'),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = module.params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ join = xmpp.Presence(to=module.params['to'])
+ join.setTag('x', namespace='http://jabber.org/protocol/muc')
+ conn.send(join)
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py
new file mode 100644
index 00000000..7333397b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, RSD Services S.A
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: java_cert
+short_description: Uses keytool to import/remove key from java keystore (cacerts)
+description:
+ - This is a wrapper module around keytool, which can be used to import/remove
+ certificates from a given java keystore.
+options:
+ cert_url:
+ description:
+ - Basic URL to fetch SSL certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: str
+ cert_port:
+ description:
+ - Port to connect to URL.
+ - This will be used to create server URL:PORT.
+ type: int
+ default: 443
+ cert_path:
+ description:
+ - Local path to load certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: path
+ cert_alias:
+ description:
+ - Imported certificate alias.
+ - The alias is used when checking for the presence of a certificate in the keystore.
+ type: str
+ trust_cacert:
+ description:
+ - Trust imported cert as CAcert.
+ type: bool
+ default: False
+ version_added: '0.2.0'
+ pkcs12_path:
+ description:
+ - Local path to load PKCS12 keystore from.
+ type: path
+ pkcs12_password:
+ description:
+ - Password for importing from PKCS12 keystore.
+ type: str
+ default: ''
+ pkcs12_alias:
+ description:
+ - Alias in the PKCS12 keystore.
+ type: str
+ keystore_path:
+ description:
+ - Path to keystore.
+ type: path
+ keystore_pass:
+ description:
+ - Keystore password.
+ type: str
+ required: true
+ keystore_create:
+ description:
+ - Create keystore if it does not exist.
+ type: bool
+ default: false
+ keystore_type:
+ description:
+ - Keystore type (JCEKS, JKS).
+ type: str
+ executable:
+ description:
+ - Path to keytool binary if not used we search in PATH for it.
+ type: str
+ default: keytool
+ state:
+ description:
+ - Defines action which can be either certificate import or removal.
+ type: str
+ choices: [ absent, present ]
+ default: present
+author:
+- Adam Hamsik (@haad)
+'''
+
+EXAMPLES = r'''
+- name: Import SSL certificate from google.com to a given cacerts keystore
+ community.general.java_cert:
+ cert_url: google.com
+ cert_port: 443
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ state: present
+
+- name: Remove certificate with given alias from a keystore
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ executable: /usr/lib/jvm/jre7/bin/keytool
+ state: absent
+
+- name: Import trusted CA from SSL certificate
+ community.general.java_cert:
+ cert_path: /opt/certs/rootca.crt
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+ cert_alias: LE_RootCA
+ trust_cacert: True
+
+- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ cert_alias: default
+ keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import SSL certificate to JCEKS keystore
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ pkcs12_alias: default
+ pkcs12_password: somepass
+ cert_alias: default
+ keystore_path: /opt/someapp/security/keystore.jceks
+ keystore_type: "JCEKS"
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+'''
+
+RETURN = r'''
+msg:
+ description: Output from stdout of keytool command after execution of given command.
+ returned: success
+ type: str
+ sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
+
+rc:
+ description: Keytool command execution return value.
+ returned: success
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done.
+ returned: success
+ type: str
+ sample: "keytool -importcert -noprompt -keystore"
+'''
+
+import os
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_keystore_type(keystore_type):
+ ''' Check that custom keystore is presented in parameters '''
+ if keystore_type:
+ return " -storetype '%s'" % keystore_type
+ return ''
+
+
+def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Check if certificate with alias is present in keystore
+ located at keystore_path '''
+ test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ (check_rc, _, _) = module.run_command(test_cmd)
+ if check_rc == 0:
+ return True
+ return False
+
+
+def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from URL into keystore located at keystore_path '''
+
+ https_proxy = os.getenv("https_proxy")
+ no_proxy = os.getenv("no_proxy")
+
+ proxy_opts = ''
+ if https_proxy is not None:
+ (proxy_host, proxy_port) = https_proxy.split(':')
+ proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port)
+
+ if no_proxy is not None:
+ # For Java's nonProxyHosts property, items are separated by '|',
+ # and patterns have to start with "*".
+ non_proxy_hosts = no_proxy.replace(',', '|')
+ non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
+
+ # The property name is http.nonProxyHosts, there is no
+ # separate setting for HTTPS.
+ proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts
+
+ fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port)
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, alias,
+ get_keystore_type(keystore_type))
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Fetch SSL certificate from remote host.
+ (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
+
+ # Use remote certificate from remote host and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ data=fetch_out,
+ check_rc=False)
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
+ error=import_err)
+
+
+def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, path, alias,
+ get_keystore_type(keystore_type))
+
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type):
+ ''' Import pkcs12 from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 "
+ "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' "
+ "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass,
+ keystore_pass, path, pkcs12_pass, pkcs12_alias,
+ alias, get_keystore_type(keystore_type))
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Delete certificate identified with alias from keystore on keystore_path '''
+ del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ # Delete SSL certificate from keystore
+ (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
+
+ diff = {'before': '%s\n' % alias, 'after': None}
+
+ module.exit_json(changed=True, msg=del_out,
+ rc=del_rc, cmd=del_cmd, stdout=del_out,
+ error=del_err, diff=diff)
+
+
+def test_keytool(module, executable):
+ ''' Test if keytool is actually executable or not '''
+ module.run_command("%s" % executable, check_rc=True)
+
+
+def test_keystore(module, keystore_path):
+ ''' Check if we can access keystore as file or not '''
+ if keystore_path is None:
+ keystore_path = ''
+
+ if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
+ # Keystore doesn't exist we want to create it
+ module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
+
+
+def main():
+ argument_spec = dict(
+ cert_url=dict(type='str'),
+ cert_path=dict(type='path'),
+ pkcs12_path=dict(type='path'),
+ pkcs12_password=dict(type='str', no_log=True),
+ pkcs12_alias=dict(type='str'),
+ cert_alias=dict(type='str'),
+ cert_port=dict(type='int', default=443),
+ keystore_path=dict(type='path'),
+ keystore_pass=dict(type='str', required=True, no_log=True),
+ trust_cacert=dict(type='bool', default=False),
+ keystore_create=dict(type='bool', default=False),
+ keystore_type=dict(type='str'),
+ executable=dict(type='str', default='keytool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']],
+ required_together=[['keystore_path', 'keystore_pass']],
+ mutually_exclusive=[
+ ['cert_url', 'cert_path', 'pkcs12_path']
+ ],
+ supports_check_mode=True,
+ )
+
+ url = module.params.get('cert_url')
+ path = module.params.get('cert_path')
+ port = module.params.get('cert_port')
+
+ pkcs12_path = module.params.get('pkcs12_path')
+ pkcs12_pass = module.params.get('pkcs12_password', '')
+ pkcs12_alias = module.params.get('pkcs12_alias', '1')
+
+ cert_alias = module.params.get('cert_alias') or url
+ trust_cacert = module.params.get('trust_cacert')
+
+ keystore_path = module.params.get('keystore_path')
+ keystore_pass = module.params.get('keystore_pass')
+ keystore_create = module.params.get('keystore_create')
+ keystore_type = module.params.get('keystore_type')
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+
+ if path and not cert_alias:
+ module.fail_json(changed=False,
+ msg="Using local path import from %s requires alias argument."
+ % keystore_path)
+
+ test_keytool(module, executable)
+
+ if not keystore_create:
+ test_keystore(module, keystore_path)
+
+ cert_present = check_cert_present(module, executable, keystore_path,
+ keystore_pass, cert_alias, keystore_type)
+
+ if state == 'absent' and cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ elif state == 'present' and not cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if pkcs12_path:
+ import_pkcs12_path(module, executable, pkcs12_path, keystore_path,
+ keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type)
+
+ if path:
+ import_cert_path(module, executable, path, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ if url:
+ import_cert_url(module, executable, url, port, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py
new file mode 100644
index 00000000..db37bdee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: java_keystore
+short_description: Create or delete a Java keystore in JKS format.
+description:
+ - Create or delete a Java keystore in JKS format for a given certificate.
+options:
+ name:
+ type: str
+ description:
+ - Name of the certificate.
+ required: true
+ certificate:
+ type: str
+ description:
+ - Certificate that should be used to create the key store.
+ required: true
+ private_key:
+ type: str
+ description:
+ - Private key that should be used to create the key store.
+ required: true
+ private_key_passphrase:
+ description:
+ - Pass phrase for reading the private key, if required.
+ type: str
+ required: false
+ version_added: '0.2.0'
+ password:
+ type: str
+ description:
+ - Password that should be used to secure the key store.
+ required: true
+ dest:
+ type: path
+ description:
+ - Absolute path where the jks should be generated.
+ required: true
+ owner:
+ description:
+ - Name of the user that should own jks file.
+ required: false
+ group:
+ description:
+ - Name of the group that should own jks file.
+ required: false
+ mode:
+ description:
+ - Mode the file should be.
+ required: false
+ force:
+ description:
+ - Key store will be created even if it already exists.
+ required: false
+ type: bool
+ default: 'no'
+requirements: [openssl, keytool]
+author: Guillaume Grossetie (@Mogztter)
+extends_documentation_fragment:
+- files
+
+'''
+
+EXAMPLES = '''
+- name: Create a key store for the given certificate (inline)
+ community.general.java_keystore:
+ name: example
+ certificate: |
+ -----BEGIN CERTIFICATE-----
+ h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
+ MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
+ -----END CERTIFICATE-----
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
+ GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
+ -----END RSA PRIVATE KEY-----
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a key store for the given certificate (lookup)
+ community.general.java_keystore:
+ name: example
+ certificate: "{{lookup('file', '/path/to/certificate.crt') }}"
+ private_key: "{{lookup('file', '/path/to/private.key') }}"
+ password: changeit
+ dest: /etc/security/keystore.jks
+'''
+
+RETURN = '''
+msg:
+ description: Output from stdout of keytool/openssl command after execution of given command or an error.
+ returned: changed and failure
+ type: str
+ sample: "Unable to find the current certificate fingerprint in ..."
+
+rc:
+ description: keytool/openssl command execution return value
+ returned: changed and failure
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done
+ returned: changed and failure
+ type: str
+ sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256"
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+
+
+def read_certificate_fingerprint(module, openssl_bin, certificate_path):
+ current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"]
+ (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd)
+ if rc != 0:
+ return module.fail_json(msg=current_certificate_fingerprint_out,
+ err=current_certificate_fingerprint_err,
+ rc=rc,
+ cmd=current_certificate_fingerprint_cmd)
+
+ current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
+ if not current_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out,
+ rc=rc,
+ cmd=current_certificate_fingerprint_err
+ )
+
+ return current_certificate_match.group(1)
+
+
+def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password):
+ stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass", keystore_password, "-v"]
+ (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd)
+ if rc != 0:
+ if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out:
+ return module.fail_json(msg=stored_certificate_fingerprint_out,
+ err=stored_certificate_fingerprint_err,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd)
+ else:
+ return None
+ else:
+ stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
+ if not stored_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd
+ )
+
+ return stored_certificate_match.group(1)
+
+
+def run_commands(module, cmd, data=None, check_rc=True):
+ return module.run_command(cmd, check_rc=check_rc, data=data)
+
+
+def create_file(path, content):
+ with open(path, 'w') as f:
+ f.write(content)
+ return path
+
+
+def create_tmp_certificate(module):
+ return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate'])
+
+
+def create_tmp_private_key(module):
+ return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key'])
+
+
+def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias):
+ certificate_path = create_tmp_certificate(module)
+ try:
+ current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path)
+ stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass)
+ return current_certificate_fingerprint != stored_certificate_fingerprint
+ finally:
+ os.remove(certificate_path)
+
+
+def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ certificate_path = create_tmp_certificate(module)
+ private_key_path = create_tmp_private_key(module)
+ try:
+ if os.path.exists(keystore_path):
+ os.remove(keystore_path)
+
+ keystore_p12_path = "/tmp/keystore.p12"
+ if os.path.exists(keystore_p12_path):
+ os.remove(keystore_p12_path)
+
+ export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path,
+ "-inkey", private_key_path, "-out",
+ keystore_p12_path, "-passout", "stdin"]
+
+ # when keypass is provided, add -passin
+ cmd_stdin = ""
+ if keypass:
+ export_p12_cmd.append("-passin")
+ export_p12_cmd.append("stdin")
+ cmd_stdin = "%s\n" % keypass
+
+ cmd_stdin += "%s\n%s" % (password, password)
+ (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd, data=cmd_stdin)
+ if rc != 0:
+ return module.fail_json(msg=export_p12_out,
+ rc=rc,
+ cmd=export_p12_cmd)
+
+ import_keystore_cmd = [keytool_bin, "-importkeystore",
+ "-destkeystore", keystore_path,
+ "-srckeystore", keystore_p12_path,
+ "-srcstoretype", "pkcs12",
+ "-alias", name,
+ "-deststorepass", password,
+ "-srcstorepass", password,
+ "-noprompt"]
+ (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None)
+ if rc == 0:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=True,
+ msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd,
+ stdout_lines=import_keystore_out)
+ else:
+ return module.fail_json(msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd)
+ finally:
+ os.remove(certificate_path)
+ os.remove(private_key_path)
+
+
+def update_jks_perm(module, keystore_path):
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=keystore_path)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = keystore_path
+ file_args = module.load_file_common_arguments(module.params)
+ module.set_fs_attributes_if_different(file_args, False)
+
+
+def process_jks(module):
+ name = module.params['name']
+ password = module.params['password']
+ keypass = module.params['private_key_passphrase']
+ keystore_path = module.params['dest']
+ force = module.params['force']
+ openssl_bin = module.get_bin_path('openssl', True)
+ keytool_bin = module.get_bin_path('keytool', True)
+
+ if os.path.exists(keystore_path):
+ if force:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name):
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if not module.check_mode:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=False)
+ else:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+
+
+class ArgumentSpec(object):
+ def __init__(self):
+ self.supports_check_mode = True
+ self.add_file_common_args = True
+ argument_spec = dict(
+ name=dict(required=True),
+ certificate=dict(required=True, no_log=True),
+ private_key=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ dest=dict(required=True, type='path'),
+ force=dict(required=False, default=False, type='bool'),
+ private_key_passphrase=dict(required=False, no_log=True, type='str')
+ )
+ self.argument_spec = argument_spec
+
+
+def main():
+ spec = ArgumentSpec()
+ module = AnsibleModule(
+ argument_spec=spec.argument_spec,
+ add_file_common_args=spec.add_file_common_args,
+ supports_check_mode=spec.supports_check_mode
+ )
+ process_jks(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py
new file mode 100644
index 00000000..4c077a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: jboss
+short_description: Deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem.
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment.
+ type: str
+ src:
+ description:
+ - The remote path of the application ear or war to deploy.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ type: path
+ deploy_path:
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens.
+ type: path
+ state:
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed.
+ type: str
+notes:
+ - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
+ - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ Duration of waiting time depends on scan-interval parameter from standalone.xml.
+ - Ensure no identically named application is deployed through the JBoss CLI
+seealso:
+- name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
+author:
+ - Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r"""
+- name: Deploy a hello world application to the default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
+- name: Update the hello world application to the non-default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deploy_path: /opt/wildfly/deployment
+ deployment: hello.war
+ state: present
+
+- name: Undeploy the hello world application from the default deploy_path
+ community.general.jboss:
+ deployment: hello.war
+ state: absent
+"""
+
+RETURN = r""" # """
+
+import os
+import shutil
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+
+DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments'
+
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
+
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
+
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path'),
+ deployment=dict(type='str', required=True),
+ deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH),
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ ),
+ required_if=[('state', 'present', ('src',))],
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ if state == 'absent' and src:
+ module.warn('Parameter src is ignored when state=absent')
+ elif state == 'present' and not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.' % src)
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ # === when check_mode ===
+ if module.check_mode:
+ if state == 'present':
+ if not deployed:
+ result['changed'] = True
+
+ elif deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ result['changed'] = True
+
+ elif state == 'absent' and deployed:
+ result['changed'] = True
+
+ module.exit_json(**result)
+ # =======================
+
+ if state == 'present' and not deployed:
+ if is_failed(deploy_path, deployment):
+ # Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
+
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py
new file mode 100644
index 00000000..0e06b5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ type: str
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mutually exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mutually exclusive with C(config).
+ - Considered if C(state=present).
+ type: bool
+ required: false
+ name:
+ type: str
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ type: str
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ type: str
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins job using basic authentication
+ community.general.jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: http://localhost:8080
+ user: admin
+
+- name: Create a jenkins job using the token
+ community.general.jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+'''
+
+import traceback
+import xml.etree.ElementTree as ET
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class JenkinsJob:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ self.EXCL_STATE = "excluded state"
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
+
+ def get_job_status(self):
+ try:
+ response = self.server.get_job_info(self.name)
+ if "color" not in response:
+ return self.EXCL_STATE
+ else:
+ return to_native(response['color'])
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception as e:
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled is False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif (status != self.EXCL_STATE and self.has_state_changed(status)):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str)).decode('ascii')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ config=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
+ enabled=dict(required=False, type='bool'),
+ token=dict(type='str', required=False, no_log=True),
+ url=dict(type='str', required=False, default="http://localhost:8080"),
+ user=dict(type='str', required=False)
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
new file mode 100644
index 00000000..e2adf7a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ type: str
+ description:
+ - Name of the Jenkins group on the OS.
+ default: jenkins
+ jenkins_home:
+ type: path
+ description:
+ - Home directory of the Jenkins user.
+ default: /var/lib/jenkins
+ mode:
+ type: raw
+ description:
+ - File mode applied on versioned plugins.
+ default: '0644'
+ name:
+ type: str
+ description:
+ - Plugin name.
+ required: yes
+ owner:
+ type: str
+ description:
+ - Name of the Jenkins user on the OS.
+ default: jenkins
+ state:
+ type: str
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ timeout:
+ type: int
+ description:
+ - Server connection timeout in secs.
+ default: 30
+ updates_expiration:
+ type: int
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ default: 86400
+ updates_url:
+ type: str
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ default: https://updates.jenkins.io
+ url:
+ type: str
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ version:
+ type: str
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For
+ example if C(1.20) would be unquoted, it would become C(1.2).
+ with_dependencies:
+ description:
+ - Defines whether to install plugin dependencies.
+ - This option takes effect only if the I(version) is not defined.
+ type: bool
+ default: yes
+
+notes:
+ - Plugin installation should be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+extends_documentation_fragment:
+ - url
+ - files
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ version: "1.15"
+
+- name: Pin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to authenticate
+#
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ url_username: admin
+ url_password: p4ssw0rd
+ url: http://localhost:8888
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: "1.4.9"
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ ansible.builtin.service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ ansible.builtin.uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+from ansible.module_utils.basic import AnsibleModule, to_bytes
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_native
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+ # Cookie jar for crumb session
+ self.cookies = None
+
+ if self._csrf_enabled():
+ self.cookies = cookiejar.LWPCookieJar()
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ if 'useCrumbs' not in csrf_data:
+ self.module.fail_json(
+ msg="Required fields not found in the Crumbs response.",
+ details=csrf_data)
+
+ return csrf_data['useCrumbs']
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.loads(to_native(r.read()))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=to_native(e))
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception as e:
+ self.module.fail_json(msg=msg_exception, details=to_native(e))
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] in [None, 'latest']:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ data = urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ hpi_file = '%s/plugins/%s.hpi' % (
+ self.params['jenkins_home'],
+ self.params['name'])
+
+ if os.path.isfile(hpi_file):
+ os.remove(hpi_file)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ checksum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ with open(plugin_file, 'rb') as plugin_fh:
+ plugin_content = plugin_fh.read()
+ checksum_old = hashlib.sha1(plugin_content).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ checksum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if checksum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ checksum_new = hashlib.sha1(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if checksum_old != checksum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ elif self.params['version'] == 'latest':
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ # If the latest version changed, download it
+ if checksum_old != to_bytes(plugin_data['sha1']):
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ update_fd, updates_file = tempfile.mkstemp()
+ os.write(update_fd, r.read())
+
+ try:
+ os.close(update_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ details=to_native(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file, encoding='utf-8')
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=to_native(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=to_native(e))
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError as e:
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=to_native(e))
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f_fd, tmp_f = tempfile.mkstemp()
+
+ if isinstance(data, (text_type, binary_type)):
+ os.write(tmp_f_fd, data)
+ else:
+ os.write(tmp_f_fd, data.read())
+
+ try:
+ os.close(tmp_f_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=to_native(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(type='str', default='jenkins'),
+ jenkins_home=dict(type='path', default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(type='str', required=True),
+ owner=dict(type='str', default='jenkins'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins.io'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError as e:
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=to_native(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py
new file mode 100644
index 00000000..68f06c27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# encoding: utf-8
+
+# (c) 2016, James Hogarth <james.hogarth@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: James Hogarth (@hogarthj)
+module: jenkins_script
+short_description: Executes a groovy script in the jenkins instance
+description:
+ - The C(jenkins_script) module takes a script plus a dict of values
+ to use within the script and returns the result of the script being run.
+
+options:
+ script:
+ type: str
+ description:
+ - The groovy script to be executed.
+ This gets passed as a string Template if args is defined.
+ required: true
+ url:
+ type: str
+ description:
+ - The jenkins server to execute the script against. The default is a local
+ jenkins instance that is not being proxied through a webserver.
+ default: http://localhost:8080
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ This should only set to C(no) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: 'yes'
+ user:
+ type: str
+ description:
+ - The username to connect to the jenkins server with.
+ password:
+ type: str
+ description:
+ - The password to connect to the jenkins server with.
+ timeout:
+ type: int
+ description:
+ - The request timeout in seconds
+ default: 10
+ args:
+ type: dict
+ description:
+ - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
+
+notes:
+ - Since the script can do anything this does not report on changes.
+ Knowing the script is being run it's important to set changed_when
+ for the ansible output to be clear on any alterations made.
+
+'''
+
+EXAMPLES = '''
+- name: Obtaining a list of plugins
+ community.general.jenkins_script:
+ script: 'println(Jenkins.instance.pluginManager.plugins)'
+ user: admin
+ password: admin
+
+- name: Setting master using a variable to hold a more complicate script
+ ansible.builtin.set_fact:
+ setmaster_mode: |
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
+
+- name: Use the variable as the script
+ community.general.jenkins_script:
+ script: "{{ setmaster_mode }}"
+ args:
+ jenkins_mode: Node.Mode.EXCLUSIVE
+
+- name: Interacting with an untrusted HTTPS connection
+ community.general.jenkins_script:
+ script: "println(Jenkins.instance.pluginManager.plugins)"
+ user: admin
+ password: admin
+ url: https://localhost
+ validate_certs: no
+'''
+
+RETURN = '''
+output:
+ description: Result of script
+ returned: success
+ type: str
+ sample: 'Result: true'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_csrf_protection_enabled(module):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/api/json',
+ timeout=module.params['timeout'],
+ method='GET')
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content).get('useCrumbs', False)
+
+
+def get_crumb(module, cookies):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/crumbIssuer/api/json',
+ method='GET',
+ timeout=module.params['timeout'],
+ cookies=cookies)
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ script=dict(required=True, type="str"),
+ url=dict(required=False, type="str", default="http://localhost:8080"),
+ validate_certs=dict(required=False, type="bool", default=True),
+ user=dict(required=False, type="str", default=None),
+ password=dict(required=False, no_log=True, type="str", default=None),
+ timeout=dict(required=False, type="int", default=10),
+ args=dict(required=False, type="dict", default=None)
+ )
+ )
+
+ if module.params['user'] is not None:
+ if module.params['password'] is None:
+ module.fail_json(msg="password required when user provided", output='')
+ module.params['url_username'] = module.params['user']
+ module.params['url_password'] = module.params['password']
+ module.params['force_basic_auth'] = True
+
+ if module.params['args'] is not None:
+ from string import Template
+ try:
+ script_contents = Template(module.params['script']).substitute(module.params['args'])
+ except KeyError as err:
+ module.fail_json(msg="Error with templating variable: %s" % err, output='')
+ else:
+ script_contents = module.params['script']
+
+ headers = {}
+ cookies = None
+ if is_csrf_protection_enabled(module):
+ cookies = cookiejar.LWPCookieJar()
+ crumb = get_crumb(module, cookies)
+ headers = {crumb['crumbRequestField']: crumb['crumb']}
+
+ resp, info = fetch_url(module,
+ module.params['url'] + "/scriptText",
+ data=urlencode({'script': script_contents}),
+ headers=headers,
+ method="POST",
+ timeout=module.params['timeout'],
+ cookies=cookies)
+
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ result = to_native(resp.read())
+
+ if 'Exception:' in result and 'at java.lang.Thread' in result:
+ module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
+
+ module.exit_json(
+ output=result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py
new file mode 100644
index 00000000..d10be9ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: jira
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ type: str
+ required: true
+ description:
+ - Base URI for the JIRA instance.
+
+ operation:
+ type: str
+ required: true
+ aliases: [ command ]
+ choices: [ comment, create, edit, fetch, link, search, transition, update ]
+ description:
+ - The operation to perform.
+
+ username:
+ type: str
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ type: str
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ type: str
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ type: str
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ type: str
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ type: str
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ type: str
+ required: false
+ description:
+ - An existing issue key to operate on.
+ aliases: ['ticket']
+
+ comment:
+ type: str
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ type: str
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ type: str
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ linktype:
+ type: str
+ required: false
+ description:
+ - Set type of link, when action 'link' selected.
+
+ inwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue from which link will be created.
+
+ outwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue to which link will be created.
+
+ fields:
+ type: dict
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
+ (possibly after merging with other required data, as when passed to create). See examples for more information,
+ and the JIRA REST API for the structure required for various fields.
+
+ jql:
+ required: false
+ description:
+ - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ type: str
+ version_added: '0.2.0'
+
+ maxresults:
+ required: false
+ description:
+ - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when I(operation=search) only, ignored otherwise.
+ type: int
+ version_added: '0.2.0'
+
+ timeout:
+ type: float
+ required: false
+ description:
+ - Set timeout, in seconds, on requests to JIRA API.
+ default: 10
+
+ validate_certs:
+ required: false
+ description:
+ - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates)
+ default: true
+ type: bool
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author:
+- "Steve Smith (@tarka)"
+- "Per Abildgaard Toft (@pertoft)"
+"""
+
+EXAMPLES = r"""
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
+ args:
+ fields:
+ customfield_13225: "test"
+ customfield_12931: '{"value": "Test"}'
+ register: issue
+
+- name: Comment on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
+- name: Set the labels on an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
+
+# Updating a field using operations: add, set & remove
+- name: Change the value of a Select dropdown
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: update
+ args:
+ fields:
+ customfield_12931: [ {'set': {'value': 'Virtual'}} ]
+ customfield_13820: [ {'set': {'value':'Manually'}} ]
+ register: cmdb_issue
+ delegate_to: localhost
+
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
+ register: issue
+
+# Search for an issue
+# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null
+- name: Search for an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: search
+ maxresults: 10
+ jql: project=cmdb AND cf[13225]="test"
+ args:
+ fields:
+ lastViewed: null
+ register: issue
+
+- name: Create a unix account for the reporter
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{ issue.meta.fields.creator.displayName }}'
+
+# You can get list of valid linktypes at /rest/api/2/issueLinkType
+# url of your jira installation.
+- name: Create link from HSP-1 to MKY-1
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ operation: link
+ linktype: Relates
+ inwardissue: HSP-1
+ outwardissue: MKY-1
+
+# Transition an issue by target status
+- name: Close the issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Done
+ args:
+ fields:
+ customfield_14321: [ {'set': {'value': 'Value of Select' }} ]
+ comment: [ { 'add': { 'body' : 'Test' } }]
+
+"""
+
+import base64
+import json
+import sys
+import traceback
+
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+
+from ansible.module_utils._text import to_text, to_bytes, to_native
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(url, user, passwd, timeout, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
+ response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
+ headers={'Content-Type': 'application/json',
+ 'Authorization': "Basic %s" % auth})
+
+ if info['status'] not in (200, 201, 204):
+ error = None
+ try:
+ error = json.loads(info['body'])
+ except Exception:
+ module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc())
+ if error:
+ msg = []
+ for key in ('errorMessages', 'errors'):
+ if error.get(key):
+ msg.append(to_native(error[key]))
+ if msg:
+ module.fail_json(msg=', '.join(msg))
+ module.fail_json(msg=to_native(error))
+ # Fallback print body, if it cant be decoded
+ module.fail_json(msg=to_native(info['body']))
+
+ body = response.read()
+
+ if body:
+ return json.loads(to_text(body, errors='surrogate_or_strict'))
+ return {}
+
+
+def post(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='POST')
+
+
+def put(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='PUT')
+
+
+def get(url, user, passwd, timeout):
+ return request(url, user, passwd, timeout)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': {'key': params['project']},
+ 'summary': params['summary'],
+ 'issuetype': {'name': params['issuetype']}}
+
+ if params['description']:
+ createfields['description'] = params['description']
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def update(restbase, user, passwd, params):
+ data = {
+ "update": params['fields'],
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def search(restbase, user, passwd, params):
+ url = restbase + '/search?jql=' + pathname2url(params['jql'])
+ if params['fields']:
+ fields = params['fields'].keys()
+ url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields])
+ if params['maxresults']:
+ url = url + '&maxResults=' + str(params['maxresults'])
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd, params['timeout'])
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = {'transition': {"id": tid},
+ 'update': params['fields']}
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def link(restbase, user, passwd, params):
+ data = {
+ 'type': {'name': params['linktype']},
+ 'inwardIssue': {'key': params['inwardissue']},
+ 'outwardIssue': {'key': params['outwardissue']},
+ }
+
+ url = restbase + '/issueLink/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(type='str', required=True),
+ operation=dict(type='str', choices=['create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'],
+ aliases=['command'], required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', ),
+ summary=dict(type='str', ),
+ description=dict(type='str', ),
+ issuetype=dict(type='str', ),
+ issue=dict(type='str', aliases=['ticket']),
+ comment=dict(type='str', ),
+ status=dict(type='str', ),
+ assignee=dict(type='str', ),
+ fields=dict(default={}, type='dict'),
+ linktype=dict(type='str', ),
+ inwardissue=dict(type='str', ),
+ outwardissue=dict(type='str', ),
+ jql=dict(type='str', ),
+ maxresults=dict(type='int'),
+ timeout=dict(type='float', default=10),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_if=(
+ ('operation', 'create', ['project', 'issuetype', 'summary']),
+ ('operation', 'comment', ['issue', 'comment']),
+ ('operation', 'fetch', ['issue']),
+ ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
+ ('operation', 'search', ['jql']),
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = {'name': module.params['assignee']}
+
+ if not uri.endswith('/'):
+ uri = uri + '/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ changed, ret = method(restbase, user, passwd, module.params)
+
+ except Exception as e:
+ return module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, meta=ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py
new file mode 100644
index 00000000..732c4723
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: katello
+short_description: Manage Katello Resources
+deprecated:
+ removed_in: '2.0.0' # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Katello resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ choices:
+
+ - repository
+ - manifest
+ - repository_set
+ - sync_plan
+ - content_view
+ - lifecycle_environment
+ - activation_key
+ - product
+
+ required: true
+ action:
+ description:
+ - action associated to the entity resource to set or edit in dictionary format.
+ - Possible Action in relation to Entitys.
+ - "sync (available when entity=product or entity=repository)"
+ - "publish (available when entity=content_view)"
+ - "promote (available when entity=content_view)"
+ choices:
+ - sync
+ - publish
+ - promote
+ required: false
+ params:
+ description:
+ - Parameters associated to the entity resource and action, to set or edit in dictionary format.
+ - Each choice may be only available with specific entitys and actions.
+ - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
+ - The action "None" means no action specified.
+ - Possible Params in relation to entity and action.
+ - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "content ([manifest,None])"
+ - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
+ - "basearch ([repository_set,None])"
+ - "releaserver ([repository_set,None])"
+ - "sync_date ([sync_plan,None])"
+ - "interval ([sync_plan,None])"
+ - "repositories ([content_view,None])"
+ - "from_environment ([content_view,promote])"
+ - "to_environment([content_view,promote])"
+ - "prior ([lifecycle_environment,None])"
+ - "content_view ([activation_key,None])"
+ - "lifecycle_environment ([activation_key,None])"
+ required: true
+ task_timeout:
+ description:
+ - The timeout in seconds to wait for the started Foreman action to finish.
+ - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
+ default: 1000
+ required: false
+ verify_ssl:
+ description:
+ - verify the ssl/https connection (e.g for a valid certificate)
+ default: false
+ type: bool
+ required: false
+'''
+
+EXAMPLES = '''
+---
+# Simple Example:
+
+- name: Create Product
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: product
+ params:
+ name: Centos 7
+ delegate_to: localhost
+
+# Abstraction Example:
+# katello.yml
+---
+- name: "{{ name }}"
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: "{{ entity }}"
+ params: "{{ params }}"
+ delegate_to: localhost
+
+# tasks.yml
+---
+- include: katello.yml
+ vars:
+ name: Create Dev Environment
+ entity: lifecycle_environment
+ params:
+ name: Dev
+ prior: Library
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create Centos Product
+ entity: product
+ params:
+ name: Centos 7
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create 7.2 Repository
+ entity: repository
+ params:
+ name: Centos 7.2
+ product: Centos 7
+ organization: Default Organization
+ content_type: yum
+ url: http://mirror.centos.org/centos/7/os/x86_64/
+
+- include: katello.yml
+ vars:
+ name: Create Centos 7 View
+ entity: content_view
+ params:
+ name: Centos 7 View
+ organization: Default Organization
+ repositories:
+ - name: Centos 7.2
+ product: Centos 7
+
+- include: katello.yml
+ vars:
+ name: Enable RHEL Product
+ entity: repository_set
+ params:
+ name: Red Hat Enterprise Linux 7 Server (RPMs)
+ product: Red Hat Enterprise Linux Server
+ organization: Default Organization
+ basearch: x86_64
+ releasever: 7
+
+- include: katello.yml
+ vars:
+ name: Promote Contentview Environment with longer timeout
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+
+# Best Practices
+
+# In Foreman, things can be done in parallel.
+# When a conflicting action is already running,
+# the task will fail instantly instead of waiting for the already running action to complete.
+# So you should use a "until success" loop to catch this.
+
+- name: Promote Contentview Environment with increased Timeout
+ community.general.katello:
+ username: ansibleuser
+ password: supersecret
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+ register: task_result
+ until: task_result is success
+ retries: 9
+ delay: 120
+
+'''
+
+RETURN = '''# '''
+
+import datetime
+import os
+import traceback
+
+try:
+ from nailgun import entities, entity_fields, entity_mixins
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module, task_timeout):
+ self._server = server
+ self._entities = entities
+ self._module = module
+ entity_mixins.TASK_TIMEOUT = task_timeout
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No organization found for %s" % name)
+
+ def find_lifecycle_environment(self, name, organization):
+ org = self.find_organization(organization)
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
+
+ def find_product(self, name, organization):
+ org = self.find_organization(organization)
+
+ product = self._entities.Product(self._server, name=name, organization=org)
+ response = product.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Product found for %s" % name)
+
+ def find_repository(self, name, product, organization):
+ product = self.find_product(product, organization)
+
+ repository = self._entities.Repository(self._server, name=name, product=product)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Repository found for %s" % name)
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+ def manifest(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ try:
+ file = open(os.getcwd() + params['content'], 'r')
+ content = file.read()
+ finally:
+ file.close()
+
+ manifest = self._entities.Subscription(self._server)
+
+ try:
+ manifest.upload(
+ data={'organization_id': org.id},
+ files={'content': content}
+ )
+ return True
+ except Exception as e:
+
+ if "Import is the same as existing data" in e.message:
+ return False
+ else:
+ self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def product(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ product = self._entities.Product(self._server, **params)
+ response = product.search()
+
+ if len(response) == 1:
+ product.id = response[0].id
+ product.update()
+ else:
+ product.create()
+
+ return True
+
+ def sync_product(self, params):
+ org = self.find_organization(params['organization'])
+ product = self.find_product(params['name'], org.name)
+
+ return product.sync()
+
+ def repository(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ params['product'] = product.id
+ del params['organization']
+
+ repository = self._entities.Repository(self._server, **params)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ repository.id = response[0].id
+ repository.update()
+ else:
+ repository.create()
+
+ return True
+
+ def sync_repository(self, params):
+ org = self.find_organization(params['organization'])
+ repository = self.find_repository(params['name'], params['product'], org.name)
+
+ return repository.sync()
+
+ def repository_set(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ del params['product']
+ del params['organization']
+
+ if not product:
+ return False
+ else:
+ reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
+ reposet = reposet.search()[0]
+
+ formatted_name = [params['name'].replace('(', '').replace(')', '')]
+ formatted_name.append(params['basearch'])
+
+ if 'releasever' in params:
+ formatted_name.append(params['releasever'])
+
+ formatted_name = ' '.join(formatted_name)
+
+ repository = self._entities.Repository(self._server, product=product, name=formatted_name)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ repository = repository.search()
+
+ if len(repository) == 0:
+ if 'releasever' in params:
+ reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
+ else:
+ reposet.enable(data={'basearch': params['basearch']})
+
+ return True
+
+ def sync_plan(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+ params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
+
+ products = params['products']
+ del params['products']
+
+ sync_plan = self._entities.SyncPlan(
+ self._server,
+ name=params['name'],
+ organization=org
+ )
+ response = sync_plan.search()
+
+ sync_plan.sync_date = params['sync_date']
+ sync_plan.interval = params['interval']
+
+ if len(response) == 1:
+ sync_plan.id = response[0].id
+ sync_plan.update()
+ else:
+ response = sync_plan.create()
+ sync_plan.id = response[0].id
+
+ if products:
+ ids = []
+
+ for name in products:
+ product = self.find_product(name, org.name)
+ ids.append(product.id)
+
+ sync_plan.add_products(data={'product_ids': ids})
+
+ return True
+
+ def content_view(self, params):
+ org = self.find_organization(params['organization'])
+
+ content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ content_view.id = response[0].id
+ content_view.update()
+ else:
+ content_view = content_view.create()
+
+ if params['repositories']:
+ repos = []
+
+ for repository in params['repositories']:
+ repository = self.find_repository(repository['name'], repository['product'], org.name)
+ repos.append(repository)
+
+ content_view.repository = repos
+ content_view.update(['repository'])
+
+ def find_content_view_version(self, name, organization, environment):
+ env = self.find_lifecycle_environment(environment, organization)
+ content_view = self.find_content_view(name, organization)
+
+ content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
+ response = content_view_version.search(['content_view'], {'environment_id': env.id})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View version found for %s" % response)
+
+ def publish(self, params):
+ content_view = self.find_content_view(params['name'], params['organization'])
+
+ return content_view.publish()
+
+ def promote(self, params):
+ to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
+ version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
+
+ data = {'environment_id': to_environment.id}
+ return version.promote(data=data)
+
+ def lifecycle_environment(self, params):
+ org = self.find_organization(params['organization'])
+ prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ lifecycle_env.id = response[0].id
+ lifecycle_env.update()
+ else:
+ lifecycle_env.create()
+
+ return True
+
+ def activation_key(self, params):
+ org = self.find_organization(params['organization'])
+
+ activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
+ response = activation_key.search()
+
+ if len(response) == 1:
+ activation_key.id = response[0].id
+ activation_key.update()
+ else:
+ activation_key.create()
+
+ if params['content_view']:
+ content_view = self.find_content_view(params['content_view'], params['organization'])
+ lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
+
+ activation_key.content_view = content_view
+ activation_key.environment = lifecycle_environment
+ activation_key.update()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True,
+ choices=['repository', 'manifest', 'repository_set', 'sync_plan',
+ 'content_view', 'lifecycle_environment', 'activation_key', 'product']),
+ action=dict(type='str', choices=['sync', 'publish', 'promote']),
+ verify_ssl=dict(type='bool', default=False),
+ task_timeout=dict(type='int', default=1000),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ action = module.params['action']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+ task_timeout = module.params['task_timeout']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module, task_timeout)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ result = False
+
+ if entity == 'product':
+ if action == 'sync':
+ result = ng.sync_product(params)
+ else:
+ result = ng.product(params)
+ elif entity == 'repository':
+ if action == 'sync':
+ result = ng.sync_repository(params)
+ else:
+ result = ng.repository(params)
+ elif entity == 'manifest':
+ result = ng.manifest(params)
+ elif entity == 'repository_set':
+ result = ng.repository_set(params)
+ elif entity == 'sync_plan':
+ result = ng.sync_plan(params)
+ elif entity == 'content_view':
+ if action == 'publish':
+ result = ng.publish(params)
+ elif action == 'promote':
+ result = ng.promote(params)
+ else:
+ result = ng.content_view(params)
+ elif entity == 'lifecycle_environment':
+ result = ng.lifecycle_environment(params)
+ elif entity == 'activation_key':
+ result = ng.activation_key(params)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+ module.exit_json(changed=result, result="%s updated" % entity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
new file mode 100644
index 00000000..ff6f9c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# encoding: utf-8 -*-
+
+# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author:
+- Matthias Vogelgesang (@matze)
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+options:
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [ absent, present ]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+'''
+
+EXAMPLES = '''
+- name: Blacklist the nouveau driver module
+ community.general.kernel_blacklist:
+ name: nouveau
+ state: present
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Blacklist(object):
+ def __init__(self, module, filename, checkmode):
+ self.filename = filename
+ self.module = module
+ self.checkmode = checkmode
+
+ def create_file(self):
+ if not self.checkmode and not os.path.exists(self.filename):
+ open(self.filename, 'a').close()
+ return True
+ elif self.checkmode and not os.path.exists(self.filename):
+ self.filename = os.devnull
+ return True
+ else:
+ return False
+
+ def get_pattern(self):
+ return r'^blacklist\s*' + self.module + '$'
+
+ def readlines(self):
+ f = open(self.filename, 'r')
+ lines = f.readlines()
+ f.close()
+ return lines
+
+ def module_listed(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ for line in lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+
+ if re.match(pattern, stripped):
+ return True
+
+ return False
+
+ def remove_module(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ if self.checkmode:
+ f = open(os.devnull, 'w')
+ else:
+ f = open(self.filename, 'w')
+
+ for line in lines:
+ if not re.match(pattern, line.strip()):
+ f.write(line)
+
+ f.close()
+
+ def add_module(self):
+ if self.checkmode:
+ f = open(os.devnull, 'a')
+ else:
+ f = open(self.filename, 'a')
+
+ f.write('blacklist %s\n' % self.module)
+
+ f.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ blacklist_file=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ args = dict(changed=False, failed=False,
+ name=module.params['name'], state=module.params['state'])
+
+ filename = '/etc/modprobe.d/blacklist-ansible.conf'
+
+ if module.params['blacklist_file']:
+ filename = module.params['blacklist_file']
+
+ blacklist = Blacklist(args['name'], filename, module.check_mode)
+
+ if blacklist.create_file():
+ args['changed'] = True
+ else:
+ args['changed'] = False
+
+ if blacklist.module_listed():
+ if args['state'] == 'absent':
+ blacklist.remove_module()
+ args['changed'] = True
+ else:
+ if args['state'] == 'present':
+ blacklist.add_module()
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py
new file mode 100644
index 00000000..b27155ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client
+
+short_description: Allows administration of Keycloak clients via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak clients via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client
+ - On C(present), the client will be created (or updated if it exists already).
+ - On C(absent), the client will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client id of client to be worked on. This is usually an alphanumeric name chosen by
+ you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ This is 'clientId' in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ is required. If you specify both, this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as I(client_id))
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client
+ This is 'rootUrl' in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client
+ This is 'adminUrl' in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client
+ This is 'baseUrl' in the Keycloak REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either C(client-secret) or
+ C(client-jwt) can be chosen. When using C(client-secret), the module parameter
+ I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ to configure its behavior.
+ This is 'clientAuthenticatorType' in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ specify a secret here (otherwise one will be generated if it does not exit). If
+ changing this secret, the module will not register a change currently (but the
+ changed secret will be saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration
+ service.
+ This is 'registrationAccessToken' in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - list of default roles for this client. If the client roles referenced do not exist
+ yet, they will be created.
+ This is 'defaultRoles' in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client.
+ This is 'redirectUris' in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+
+ web_origins:
+ description:
+ - List of allowed CORS origins.
+ This is 'webOrigins' in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
+ This is 'notBefore' in the Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only.
+ This is 'bearerOnly' in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access.
+ This is 'consentRequired' in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect).
+ This is 'standardFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect).
+ This is 'implicitFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect).
+ This is 'directAccessGrantsEnabled' in the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect).
+ This is 'serviceAccountsEnabled' in the Keycloak REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect).
+ This is 'authorizationServicesEnabled' in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not.
+ This is 'publicClient' in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not.
+ This is 'frontchannelLogout' in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client (either C(openid-connect) or C(saml).
+ type: str
+ choices: ['openid-connect', 'saml']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client.
+ This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - dict of registered cluster nodes (with C(nodename) as the key and last registration
+ time as the value).
+ This is 'registeredNodes' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field will silently
+ be dropped.
+ This is 'clientTemplate' in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the I(client_template).
+ This is 'useTemplateConfig' in the Keycloak REST API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the I(client_template).
+ This is 'useTemplateScope' in the Keycloak REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the I(client_template).
+ This is 'useTemplateMappers' in the Keycloak REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required.
+ This is 'surrogateAuthRequired' in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - a data structure defining the authorization settings for this client. For reference,
+ please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
+ This is 'authorizationSettings' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client.
+ This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration
+ settings; an example is given in the examples section. While an exhaustive list of
+ permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
+ API does not validate whether a given option is appropriate for the protocol used; if specified
+ anyway, Keycloak will simply not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
+ description:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
+ should be included in the login response.
+
+ saml.client.signature:
+ description:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+
+ saml.encrypt:
+ description:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+
+ saml.force.post.binding:
+ description:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+
+ saml.onetimeuse.condition:
+ description:
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key id in the SAML Extensions element.
+
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+
+
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
+
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely
+ C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding url for the client's single logout service.
+
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding url for the client's single logout service.
+
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
+ OIDC request object. One of C(any), C(none), C(RS256).
+
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
+ public keys.
+
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by
+ client and signed by its key, base64-encoded.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client (minimal example)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: present
+
+- name: Delete a Keycloak client
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: absent
+
+- name: Create or update a Keycloak client (with all the bells and whistles)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ realm: master
+ client_id: test
+ id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
+ name: this_is_a_test
+ description: Description of this wonderful client
+ root_url: https://www.example.com/
+ admin_url: https://www.example.com/admin_url
+ base_url: basepath
+ enabled: True
+ client_authenticator_type: client-secret
+ secret: REALLYWELLKEPTSECRET
+ redirect_uris:
+ - https://www.example.com/*
+ - http://localhost:8888/
+ web_origins:
+ - https://www.example.com/*
+ not_before: 1507825725
+ bearer_only: False
+ consent_required: False
+ standard_flow_enabled: True
+ implicit_flow_enabled: False
+ direct_access_grants_enabled: False
+ service_accounts_enabled: False
+ authorization_services_enabled: False
+ public_client: False
+ frontchannel_logout: False
+ protocol: openid-connect
+ full_scope_allowed: false
+ node_re_registration_timeout: -1
+ client_template: test
+ use_template_config: False
+ use_template_scope: false
+ use_template_mappers: no
+ registered_nodes:
+ node01.example.com: 1507828202
+ registration_access_token: eyJWT_TOKEN
+ surrogate_auth_required: false
+ default_roles:
+ - test01
+ - test02
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ consentRequired: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ saml.authnstatement: True
+ saml.client.signature: True
+ saml.force.post.binding: True
+ saml.server.signature: True
+ saml.signature.algorithm: RSA_SHA256
+ saml.signing.certificate: CERTIFICATEHERE
+ saml.signing.private.key: PRIVATEKEYHERE
+ saml_force_name_id_format: False
+ saml_name_id_format: username
+ saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
+ user.info.response.signature.alg: RS256
+ request.object.signature.alg: RS256
+ use.jwks.url: true
+ jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
+ jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
+
+proposed:
+ description: client representation of proposed changes to client
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+existing:
+ description: client representation of existing client (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+end_state:
+ description: client representation of client after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(clientrep):
+ """ Removes probably sensitive details from a client representation
+
+ :param clientrep: the clientrep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = clientrep.copy()
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ root_url=dict(type='str', aliases=['rootUrl']),
+ admin_url=dict(type='str', aliases=['adminUrl']),
+ base_url=dict(type='str', aliases=['baseUrl']),
+ surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
+ enabled=dict(type='bool'),
+ client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
+ secret=dict(type='str', no_log=True),
+ registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
+ default_roles=dict(type='list', aliases=['defaultRoles']),
+ redirect_uris=dict(type='list', aliases=['redirectUris']),
+ web_origins=dict(type='list', aliases=['webOrigins']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ bearer_only=dict(type='bool', aliases=['bearerOnly']),
+ consent_required=dict(type='bool', aliases=['consentRequired']),
+ standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
+ implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
+ direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
+ service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
+ authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
+ public_client=dict(type='bool', aliases=['publicClient']),
+ frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
+ node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
+ registered_nodes=dict(type='dict', aliases=['registeredNodes']),
+ client_template=dict(type='str', aliases=['clientTemplate']),
+ use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
+ use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
+ use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['client_id', 'id']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ cid = module.params.get('id')
+ state = module.params.get('state')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ client_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+ keycloak_argument_spec().keys()
+ # See whether the client already exists in Keycloak
+ if cid is None:
+ before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
+ if before_client is not None:
+ cid = before_client['id']
+ else:
+ before_client = kc.get_client_by_id(cid, realm=realm)
+
+ if before_client is None:
+ before_client = dict()
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for client_param in client_params:
+ new_param_value = module.params.get(client_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if client_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if client_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+
+ changeset[camel(client_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_client = before_client.copy()
+ updated_client.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ result['existing'] = sanitize_cr(before_client)
+
+ # If the client does not exist yet, before_client is still empty
+ if before_client == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client
+ result['changed'] = True
+ if 'clientId' not in updated_client:
+ module.fail_json(msg='client_id needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(updated_client))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client(updated_client, realm=realm)
+ after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been created.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(updated_client))
+ result['changed'] = (before_client != updated_client)
+
+ module.exit_json(**result)
+
+ kc.update_client(cid, updated_client, realm=realm)
+
+ after_client = kc.get_client_by_id(cid, realm=realm)
+ if before_client == after_client:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(after_client))
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = sanitize_cr(before_client)
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
new file mode 100644
index 00000000..d68198d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clienttemplate
+
+short_description: Allows administration of Keycloak client templates via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
+
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client template
+ - On C(present), the client template will be created (or updated if it exists already).
+ - On C(absent), the client template will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - Id of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak
+ type: str
+
+ protocol:
+ description:
+ - Type of client template (either C(openid-connect) or C(saml).
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client template.
+ This is 'protocolMappers' in the Keycloak REST API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the "existing" field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various
+ configuration settings, though in the default installation of Keycloak as of 3.4, none
+ are documented or known, so this is usually empty.
+ type: dict
+
+notes:
+- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
+ I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
+ I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
+ they are not available through this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client template (minimal)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+
+- name: Delete Keycloak client template
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ state: absent
+ name: test01
+
+- name: Create or update Keycloak client template (with a protocol mapper)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ full_scope_allowed: false
+ id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
+
+proposed:
+ description: client template representation of proposed changes to client template
+ returned: always
+ type: dict
+ sample: {
+ name: "test01"
+ }
+existing:
+ description: client template representation of existing client template (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+end_state:
+ description: client template representation of client template after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ clientt_params = [x for x in module.params
+ if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm',
+ 'auth_client_secret', 'auth_username', 'auth_password',
+ 'validate_certs', 'realm'] and module.params.get(x) is not None]
+
+ # See whether the client template already exists in Keycloak
+ if cid is None:
+ before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm)
+ if before_clientt is not None:
+ cid = before_clientt['id']
+ else:
+ before_clientt = kc.get_client_template_by_id(cid, realm=realm)
+
+ if before_clientt is None:
+ before_clientt = dict()
+
+ result['existing'] = before_clientt
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for clientt_param in clientt_params:
+ # lists in the Keycloak API are sorted
+ new_param_value = module.params.get(clientt_param)
+ if isinstance(new_param_value, list):
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ changeset[camel(clientt_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_clientt = before_clientt.copy()
+ updated_clientt.update(changeset)
+
+ result['proposed'] = changeset
+
+ # If the client template does not exist yet, before_client is still empty
+ if before_clientt == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client template does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client template
+ result['changed'] = True
+ if 'name' not in updated_clientt:
+ module.fail_json(msg='name needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_clientt)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client_template(updated_clientt, realm=realm)
+ after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm)
+
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been created.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client template
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client template with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=updated_clientt)
+
+ module.exit_json(**result)
+
+ kc.update_client_template(cid, updated_clientt, realm=realm)
+
+ after_clientt = kc.get_client_template_by_id(cid, realm=realm)
+ if before_clientt == after_clientt:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=after_clientt)
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been updated.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = before_clientt
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client_template(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client template %s has been deleted.' % before_clientt['name']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py
new file mode 100644
index 00000000..45b5c290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_group
+
+short_description: Allows administration of Keycloak groups via Keycloak API
+
+description:
+ - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup
+ to the API to translate the name into the group ID.
+
+
+options:
+ state:
+ description:
+ - State of the group.
+ - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the group will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but
+ providing it will reduce the number of API calls required.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+notes:
+ - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ are read-only for groups. This limitation will be removed in a later version of this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Adam Goossens (@adamgoossens)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak group
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak group based on name
+ community.general.keycloak_group:
+ name: my-group-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-group-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+group:
+ description: Group representation of the group after module execution (sample is truncated).
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups will have the same parameters as
+ documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ attributes=dict(type='dict')
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, group='')
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('id')
+ name = module.params.get('name')
+ attributes = module.params.get('attributes')
+
+ before_group = None # current state of the group, for merging.
+
+ # does the group already exist?
+ if gid is None:
+ before_group = kc.get_group_by_name(name, realm=realm)
+ else:
+ before_group = kc.get_group_by_groupid(gid, realm=realm)
+
+ before_group = {} if before_group is None else before_group
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if attributes is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ group_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # build a changeset
+ changeset = {}
+ for param in group_params:
+ new_param_value = module.params.get(param)
+ old_value = before_group[param] if param in before_group else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # prepare the new group
+ updated_group = before_group.copy()
+ updated_group.update(changeset)
+
+ # if before_group is none, the group doesn't exist.
+ if before_group == {}:
+ if state == 'absent':
+ # nothing to do.
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Group does not exist; doing nothing.'
+ result['group'] = dict()
+ module.exit_json(**result)
+
+ # for 'present', create a new group.
+ result['changed'] = True
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new group')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do it for real!
+ kc.create_group(updated_group, realm=realm)
+ after_group = kc.get_group_by_name(name, realm)
+
+ result['group'] = after_group
+ result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'],
+ id=after_group['id'])
+
+ else:
+ if state == 'present':
+ # no changes
+ if updated_group == before_group:
+ result['changed'] = False
+ result['group'] = updated_group
+ result['msg'] = "No changes required to group {name}.".format(name=before_group['name'])
+ module.exit_json(**result)
+
+ # update the existing group
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_group(updated_group, realm=realm)
+
+ after_group = kc.get_group_by_groupid(updated_group['id'], realm=realm)
+
+ result['group'] = after_group
+ result['msg'] = "Group {id} has been updated".format(id=after_group['id'])
+
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ result['group'] = dict()
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete for real
+ gid = before_group['id']
+ kc.delete_group(groupid=gid, realm=realm)
+
+ result['changed'] = True
+ result['msg'] = "Group {name} has been deleted".format(name=before_group['name'])
+
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py
new file mode 100644
index 00000000..e84d8a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Thierno IB. BARRY @barryib
+# Sponsored by Polyconseil http://polyconseil.fr.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - This module can be used to manage Kibana plugins.
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ - For local file, prefix its absolute path with file://
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h etc."
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the Kibana binary.
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana.
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update.
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+
+- name: Install specific version of a plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+- name: Uninstall Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin management (install / remove)
+ returned: success
+ type: str
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: str
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: str
+timeout:
+ description: the timeout for plugin download
+ returned: success
+ type: str
+stdout:
+ description: the command stdout
+ returned: success
+ type: str
+stderr:
+ description: the command stderr
+ returned: success
+ type: str
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: str
+'''
+
+import os
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "install"]
+ if url:
+ cmd_args.append(url)
+ else:
+ cmd_args.append(plugin_name)
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "remove", plugin_name]
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def get_kibana_version(module, plugin_bin):
+ cmd_args = [plugin_bin, '--version']
+ cmd = " ".join(cmd_args)
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to get Kibana version : %s" % err)
+
+ return out.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ changed, cmd, out, err = False, '', '', ''
+
+ kibana_version = get_kibana_version(module, plugin_bin)
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if version:
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py
new file mode 100644
index 00000000..f25d7d70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_cdi_upload
+
+short_description: Upload local VM images to CDI Upload Proxy.
+
+
+author: KubeVirt Team (@kubevirt)
+
+
+description:
+ - Use Openshift Python SDK to create UploadTokenRequest objects.
+ - Transfer contents of local files to the CDI Upload Proxy.
+
+options:
+ pvc_name:
+ description:
+ - Use to specify the name of the target PersistentVolumeClaim.
+ required: true
+ pvc_namespace:
+ description:
+ - Use to specify the namespace of the target PersistentVolumeClaim.
+ required: true
+ upload_host:
+ description:
+ - URL containing the host and port on which the CDI Upload Proxy is available.
+ - "More info: U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/upload.md#expose-cdi-uploadproxy-service)"
+ upload_host_validate_certs:
+ description:
+ - Whether or not to verify the CDI Upload Proxy's SSL certificates against your system's CA trust store.
+ default: true
+ type: bool
+ aliases: [ upload_host_verify_ssl ]
+ path:
+ description:
+ - Path of local image file to transfer.
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+ - requests >= 2.0.0
+'''
+
+EXAMPLES = '''
+- name: Upload local image to pvc-vm1
+ community.general.kubevirt_cdi_upload:
+ pvc_namespace: default
+ pvc_name: pvc-vm1
+ upload_host: https://localhost:8443
+ upload_host_validate_certs: false
+ path: /tmp/cirros-0.4.0-x86_64-disk.img
+'''
+
+RETURN = '''# '''
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+SERVICE_ARG_SPEC = {
+ 'pvc_name': {'required': True},
+ 'pvc_namespace': {'required': True},
+ 'upload_host': {'required': True},
+ 'upload_host_validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['upload_host_verify_ssl']
+ },
+ 'path': {'required': True},
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+}
+
+
+class KubeVirtCDIUpload(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtCDIUpload, self).__init__(*args, k8s_kind='UploadTokenRequest', **kwargs)
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(SERVICE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ """ Module execution """
+
+ API = 'v1alpha1'
+ KIND = 'UploadTokenRequest'
+
+ self.client = self.get_api_client()
+
+ api_version = 'upload.cdi.kubevirt.io/{0}'.format(API)
+ pvc_name = self.params.get('pvc_name')
+ pvc_namespace = self.params.get('pvc_namespace')
+ upload_host = self.params.get('upload_host')
+ upload_host_verify_ssl = self.params.get('upload_host_validate_certs')
+ path = self.params.get('path')
+
+ definition = defaultdict(defaultdict)
+
+ definition['kind'] = KIND
+ definition['apiVersion'] = api_version
+
+ def_meta = definition['metadata']
+ def_meta['name'] = pvc_name
+ def_meta['namespace'] = pvc_namespace
+
+ def_spec = definition['spec']
+ def_spec['pvcName'] = pvc_name
+
+ # Let's check the file's there before we do anything else
+ imgfile = open(path, 'rb')
+
+ resource = self.find_resource(KIND, api_version, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ headers = {'Authorization': "Bearer {0}".format(result['result']['status']['token'])}
+ url = "{0}/{1}/upload".format(upload_host, API)
+ ret = requests.post(url, data=imgfile, headers=headers, verify=upload_host_verify_ssl)
+
+ if ret.status_code != 200:
+ self.fail_request("Something went wrong while uploading data", method='POST', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ self.exit_json(changed=True)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = KubeVirtCDIUpload()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py
new file mode 100644
index 00000000..7e0776c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_preset
+
+short_description: Manage KubeVirt virtual machine presets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine presets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine preset.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine preset exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine preset."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: present
+ name: vmi-preset-small
+ namespace: vms
+ memory: 64M
+ selector:
+ matchLabels:
+ kubevirt.io/vmPreset: vmi-preset-small
+
+- name: Remove virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: absent
+ name: vmi-preset-small
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_preset:
+ description:
+ - The virtual machine preset managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC
+)
+
+
+KIND = 'VirtualMachineInstancePreset'
+VMP_ARG_SPEC = {
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMPreset(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VMP_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ # FIXME: Devices must be set, but we don't yet support any
+ # attributes there, remove when we do:
+ definition['spec']['domain']['devices'] = dict()
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_preset': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMPreset()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py
new file mode 100644
index 00000000..5687c23d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_pvc
+
+short_description: Manage PVCs on Kubernetes
+
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - Use Openshift Python SDK to manage PVCs on Kubernetes
+ - Support Containerized Data Importer out of the box
+
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the PVC object being created/updated. Here you can define Kubernetes
+ PVC Resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ state:
+ description:
+ - "Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ module options and I(resource_definition)."
+ default: present
+ choices:
+ - present
+ - absent
+ force:
+ description:
+ - If set to C(True), and I(state) is C(present), an existing object will be replaced.
+ default: false
+ type: bool
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - "This defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources."
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - If more than one merge_type is given, the merge_types will be tried in order
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ name:
+ description:
+ - Use to specify a PVC object name.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Use to specify a PVC object namespace.
+ required: true
+ type: str
+ annotations:
+ description:
+ - Annotations attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ type: dict
+ labels:
+ description:
+ - Labels attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ selector:
+ description:
+ - A label query over volumes to consider for binding.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ access_modes:
+ description:
+ - Contains the desired access modes the volume should have.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes)"
+ type: list
+ size:
+ description:
+ - How much storage to allocate to the PVC.
+ type: str
+ aliases:
+ - storage
+ storage_class_name:
+ description:
+ - Name of the StorageClass required by the claim.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1)"
+ type: str
+ volume_mode:
+ description:
+ - "This defines what type of volume is required by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature of kubernetes and may change in the future."
+ type: str
+ volume_name:
+ description:
+ - This is the binding reference to the PersistentVolume backing this claim.
+ type: str
+ cdi_source:
+ description:
+ - "If data is to be copied onto the PVC using the Containerized Data Importer you can specify the source of
+ the data (along with any additional configuration) as well as it's format."
+ - "Valid source types are: blank, http, s3, registry, pvc and upload. The last one requires using the
+ M(community.general.kubevirt_cdi_upload) module to actually perform an upload."
+ - "Source data format is specified using the optional I(content_type). Valid options are C(kubevirt)
+ (default; raw image) and C(archive) (tar.gz)."
+ - "This uses the DataVolume source syntax:
+ U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/datavolumes.md#https3registry-source)"
+ type: dict
+ wait:
+ description:
+ - "If set, this module will wait for the PVC to become bound and CDI (if enabled) to finish its operation
+ before returning."
+ - "Used only if I(state) set to C(present)."
+ - "Unless used in conjunction with I(cdi_source), this might result in a timeout, as clusters may be configured
+ to not bind PVCs until first usage."
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - Specifies how much time in seconds to wait for PVC creation to complete if I(wait) option is enabled.
+ - Default value is reasonably high due to an expectation that CDI might take a while to finish its operation.
+ type: int
+ default: 300
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create a PVC and import data from an external source
+ community.general.kubevirt_pvc:
+ name: pvc1
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ http:
+ url: https://www.source.example/path/of/data/vm.img
+ # If the URL points to a tar.gz containing the disk image, uncomment the line below:
+ #content_type: archive
+
+- name: Create a PVC as a clone from a different PVC
+ community.general.kubevirt_pvc:
+ name: pvc2
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ pvc:
+ namespace: source-ns
+ name: source-pvc
+
+- name: Create a PVC ready for data upload
+ community.general.kubevirt_pvc:
+ name: pvc3
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ upload: yes
+ # You need the kubevirt_cdi_upload module to actually upload something
+
+- name: Create a PVC with a blank raw image
+ community.general.kubevirt_pvc:
+ name: pvc4
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ blank: yes
+
+- name: Create a PVC and fill it with data from a container
+ community.general.kubevirt_pvc:
+ name: pvc5
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ registry:
+ url: "docker://kubevirt/fedora-cloud-registry-disk-demo"
+
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.module_utils.kubevirt import virtdict, KubeVirtRawModule
+
+
+PVC_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent'
+ ],
+ 'default': 'present'
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'labels': {'type': 'dict'},
+ 'annotations': {'type': 'dict'},
+ 'selector': {'type': 'dict'},
+ 'access_modes': {'type': 'list'},
+ 'size': {
+ 'type': 'str',
+ 'aliases': ['storage']
+ },
+ 'storage_class_name': {'type': 'str'},
+ 'volume_mode': {'type': 'str'},
+ 'volume_name': {'type': 'str'},
+ 'cdi_source': {'type': 'dict'},
+ 'wait': {
+ 'type': 'bool',
+ 'default': False
+ },
+ 'wait_timeout': {
+ 'type': 'int',
+ 'default': 300
+ }
+}
+
+
+class CreatePVCFailed(Exception):
+ pass
+
+
+class KubevirtPVC(KubernetesRawModule):
+ def __init__(self):
+ super(KubevirtPVC, self).__init__()
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(PVC_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _parse_cdi_source(self, _cdi_src, metadata):
+ cdi_src = copy.deepcopy(_cdi_src)
+ annotations = metadata['annotations']
+ labels = metadata['labels']
+
+ valid_content_types = ('kubevirt', 'archive')
+ valid_sources = ('http', 's3', 'pvc', 'upload', 'blank', 'registry')
+
+ if 'content_type' in cdi_src:
+ content_type = cdi_src.pop('content_type')
+ if content_type not in valid_content_types:
+ raise ValueError("cdi_source.content_type must be one of {0}, not: '{1}'".format(
+ valid_content_types, content_type))
+ annotations['cdi.kubevirt.io/storage.contentType'] = content_type
+
+ if len(cdi_src) != 1:
+ raise ValueError("You must specify exactly one valid CDI source, not {0}: {1}".format(len(cdi_src), tuple(cdi_src.keys())))
+
+ src_type = tuple(cdi_src.keys())[0]
+ src_spec = cdi_src[src_type]
+
+ if src_type not in valid_sources:
+ raise ValueError("Got an invalid CDI source type: '{0}', must be one of {1}".format(src_type, valid_sources))
+
+ # True for all cases save one
+ labels['app'] = 'containerized-data-importer'
+
+ if src_type == 'upload':
+ annotations['cdi.kubevirt.io/storage.upload.target'] = ''
+ elif src_type == 'blank':
+ annotations['cdi.kubevirt.io/storage.import.source'] = 'none'
+ elif src_type == 'pvc':
+ if not isinstance(src_spec, dict) or sorted(src_spec.keys()) != ['name', 'namespace']:
+ raise ValueError("CDI Source 'pvc' requires specifying 'name' and 'namespace' (and nothing else)")
+ labels['app'] = 'host-assisted-cloning'
+ annotations['k8s.io/CloneRequest'] = '{0}/{1}'.format(src_spec['namespace'], src_spec['name'])
+ elif src_type in ('http', 's3', 'registry'):
+ if not isinstance(src_spec, dict) or 'url' not in src_spec:
+ raise ValueError("CDI Source '{0}' requires specifying 'url'".format(src_type))
+ unknown_params = set(src_spec.keys()).difference(set(('url', 'secretRef', 'certConfigMap')))
+ if unknown_params:
+ raise ValueError("CDI Source '{0}' does not know recognize params: {1}".format(src_type, tuple(unknown_params)))
+ annotations['cdi.kubevirt.io/storage.import.source'] = src_type
+ annotations['cdi.kubevirt.io/storage.import.endpoint'] = src_spec['url']
+ if 'secretRef' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.secretName'] = src_spec['secretRef']
+ if 'certConfigMap' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.certConfigMap'] = src_spec['certConfigMap']
+
+ def _wait_for_creation(self, resource, uid):
+ return_obj = None
+ desired_cdi_status = 'Succeeded'
+ use_cdi = True if self.params.get('cdi_source') else False
+ if use_cdi and 'upload' in self.params['cdi_source']:
+ desired_cdi_status = 'Running'
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ metadata = entity.metadata
+ if not hasattr(metadata, 'uid') or metadata.uid != uid:
+ continue
+ if entity.status.phase == 'Bound':
+ if use_cdi and hasattr(metadata, 'annotations'):
+ import_status = metadata.annotations.get('cdi.kubevirt.io/storage.pod.phase')
+ if import_status == desired_cdi_status:
+ return_obj = entity
+ break
+ elif import_status == 'Failed':
+ raise CreatePVCFailed("PVC creation incomplete; importing data failed")
+ else:
+ return_obj = entity
+ break
+ elif entity.status.phase == 'Failed':
+ raise CreatePVCFailed("PVC creation failed")
+
+ if not return_obj:
+ raise CreatePVCFailed("PVC creation timed out")
+
+ return self.fix_serialization(return_obj)
+
+ def execute_module(self):
+ KIND = 'PersistentVolumeClaim'
+ API = 'v1'
+
+ definition = virtdict()
+ definition['kind'] = KIND
+ definition['apiVersion'] = API
+
+ metadata = definition['metadata']
+ metadata['name'] = self.params.get('name')
+ metadata['namespace'] = self.params.get('namespace')
+ if self.params.get('annotations'):
+ metadata['annotations'] = self.params.get('annotations')
+ if self.params.get('labels'):
+ metadata['labels'] = self.params.get('labels')
+ if self.params.get('cdi_source'):
+ self._parse_cdi_source(self.params.get('cdi_source'), metadata)
+
+ spec = definition['spec']
+ if self.params.get('access_modes'):
+ spec['accessModes'] = self.params.get('access_modes')
+ if self.params.get('size'):
+ spec['resources']['requests']['storage'] = self.params.get('size')
+ if self.params.get('storage_class_name'):
+ spec['storageClassName'] = self.params.get('storage_class_name')
+ if self.params.get('selector'):
+ spec['selector'] = self.params.get('selector')
+ if self.params.get('volume_mode'):
+ spec['volumeMode'] = self.params.get('volume_mode')
+ if self.params.get('volume_name'):
+ spec['volumeName'] = self.params.get('volume_name')
+
+ # 'resource_definition:' has lower priority than module parameters
+ definition = dict(KubeVirtRawModule.merge_dicts(definition, self.resource_definitions[0]))
+
+ self.client = self.get_api_client()
+ resource = self.find_resource(KIND, API, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+ if self.params.get('wait') and self.params.get('state') == 'present':
+ result['result'] = self._wait_for_creation(resource, result['result']['metadata']['uid'])
+
+ self.exit_json(**result)
+
+
+def main():
+ module = KubevirtPVC()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py
new file mode 100644
index 00000000..d1fdc394
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_rs
+
+short_description: Manage KubeVirt virtual machine replica sets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine replica sets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine replica set.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine replica set exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine."
+ required: true
+ type: dict
+ replicas:
+ description:
+ - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
+ - Replicas defaults to 1 if newly created replica set.
+ type: int
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: present
+ name: myvmir
+ namespace: vms
+ wait: true
+ replicas: 3
+ memory: 64M
+ labels:
+ myvmi: myvmi
+ selector:
+ matchLabels:
+ myvmi: myvmi
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Remove virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: absent
+ name: myvmir
+ namespace: vms
+ wait: true
+'''
+
+RETURN = '''
+kubevirt_rs:
+ description:
+ - The virtual machine virtual machine replica set managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+)
+
+
+KIND = 'VirtualMachineInstanceReplicaSet'
+VMIR_ARG_SPEC = {
+ 'replicas': {'type': 'int'},
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMIRS(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
+ return argument_spec
+
+ def wait_for_replicas(self, replicas):
+ """ Wait for ready_replicas to equal the requested number of replicas. """
+ resource = self.find_supported_resource(KIND)
+ return_obj = None
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ readyReplicas = status.get('readyReplicas', 0)
+ if readyReplicas == replicas:
+ return_obj = entity
+ break
+
+ if not return_obj:
+ self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas is None:
+ self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas != replicas:
+ self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
+ "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
+ return return_obj.to_dict()
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+ replicas = self.params.get('replicas')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ if replicas is not None:
+ definition['spec']['replicas'] = replicas
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ template = definition['spec']['template']
+ dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
+ # wait logic work correctly
+ if changed and result_crud['method'] == 'create' and replicas is None:
+ replicas = 1
+
+ # Wait for the new number of ready replicas after a CRUD update
+ # Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
+ # Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
+ # achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
+ if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
+ result = self.wait_for_replicas(replicas)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_rs': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMIRS()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py
new file mode 100644
index 00000000..3054b1a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_template
+
+short_description: Manage KubeVirt templates
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt templates.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ name:
+ description:
+ - Name of the Template object.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the Template object exists.
+ required: true
+ type: str
+ objects:
+ description:
+ - List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
+ will be created exactly as defined here, with any parameter values substituted in prior to creation.
+ The definition of these objects can reference parameters defined earlier.
+ - As part of the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
+ user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
+ please take a look at M(community.general.kubevirt_vm) module and at EXAMPLES section, where you can see example.
+ type: list
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ display_name:
+ description:
+ - "A brief, user-friendly name, which can be employed by user interfaces."
+ type: str
+ description:
+ description:
+ - A description of the template.
+ - Include enough detail that the user will understand what is being deployed...
+ and any caveats they need to know before deploying. It should also provide links to additional information,
+ such as a README file."
+ type: str
+ long_description:
+ description:
+ - "Additional template description. This may be displayed by the service catalog, for example."
+ type: str
+ provider_display_name:
+ description:
+ - "The name of the person or organization providing the template."
+ type: str
+ documentation_url:
+ description:
+ - "A URL referencing further documentation for the template."
+ type: str
+ support_url:
+ description:
+ - "A URL where support can be obtained for the template."
+ type: str
+ editable:
+ description:
+ - "Extension for hinting at which elements should be considered editable.
+ List of jsonpath selectors. The jsonpath root is the objects: element of the template."
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: list
+ default_disk:
+ description:
+ - "The goal of default disk is to define what kind of disk is supported by the OS mainly in
+ terms of bus (ide, scsi, sata, virtio, ...)"
+ - The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_volume:
+ description:
+ - "The goal of default volume is to be able to configure mostly performance parameters like
+ caches if those are exposed by the underlying volume implementation."
+ - The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_nic:
+ description:
+ - "The goal of default network is similar to I(default_disk) and should be used as a template
+ to ensure OS compatibility and performance."
+ - The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_network:
+ description:
+ - "The goal of default network is similar to I(default_volume) and should be used as a template
+ that specifies performance and connection parameters (L2 bridge for example)"
+ - The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ icon_class:
+ description:
+ - "An icon to be displayed with your template in the web console. Choose from our existing logo
+ icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
+ CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template.
+ You must specify an icon class that exists, or it will prevent falling back to the generic icon."
+ type: str
+ parameters:
+ description:
+ - "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
+ Then, that value is substituted wherever the parameter is referenced. References can be defined in any
+ field in the objects list field. This is useful for generating random passwords or allowing the user to
+ supply a host name or other user-specific value that is required to customize the template."
+ - "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
+ type: list
+ version:
+ description:
+ - Template structure version.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: str
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.kubernetes.k8s_state_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create template 'mytemplate'
+ community.general.kubevirt_template:
+ state: present
+ name: myvmtemplate
+ namespace: templates
+ display_name: Generic cirros template
+ description: Basic cirros template
+ long_description: Verbose description of cirros template
+ provider_display_name: Just Be Cool, Inc.
+ documentation_url: http://theverycoolcompany.com
+ support_url: http://support.theverycoolcompany.com
+ icon_class: icon-linux
+ default_disk:
+ disk:
+ bus: virtio
+ default_nic:
+ model: virtio
+ default_network:
+ resource:
+ resourceName: bridge.network.kubevirt.io/cnvmgmt
+ default_volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ objects:
+ - name: ${NAME}
+ kind: VirtualMachine
+ memory: ${MEMORY_SIZE}
+ state: present
+ namespace: vms
+ parameters:
+ - name: NAME
+ description: VM name
+ generate: expression
+ from: 'vm-[A-Za-z0-9]{8}'
+ - name: MEMORY_SIZE
+ description: Memory size
+ value: 1Gi
+
+- name: Remove template 'myvmtemplate'
+ community.general.kubevirt_template:
+ state: absent
+ name: myvmtemplate
+ namespace: templates
+'''
+
+RETURN = '''
+kubevirt_template:
+ description:
+ - The template dictionary specification returned by the API.
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ API_GROUP,
+ MAX_SUPPORTED_API_VERSION
+)
+
+
+TEMPLATE_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'objects': {
+ 'type': 'list',
+ },
+ 'display_name': {
+ 'type': 'str',
+ },
+ 'description': {
+ 'type': 'str',
+ },
+ 'long_description': {
+ 'type': 'str',
+ },
+ 'provider_display_name': {
+ 'type': 'str',
+ },
+ 'documentation_url': {
+ 'type': 'str',
+ },
+ 'support_url': {
+ 'type': 'str',
+ },
+ 'icon_class': {
+ 'type': 'str',
+ },
+ 'version': {
+ 'type': 'str',
+ },
+ 'editable': {
+ 'type': 'list',
+ },
+ 'default_disk': {
+ 'type': 'dict',
+ },
+ 'default_volume': {
+ 'type': 'dict',
+ },
+ 'default_network': {
+ 'type': 'dict',
+ },
+ 'default_nic': {
+ 'type': 'dict',
+ },
+ 'parameters': {
+ 'type': 'list',
+ },
+}
+
+
+class KubeVirtVMTemplate(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(TEMPLATE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+
+ # Execute the CRUD of VM template:
+ kind = 'Template'
+ template_api_version = 'template.openshift.io/v1'
+
+ # Fill in template parameters:
+ definition['parameters'] = self.params.get('parameters')
+
+ # Fill in the default Label
+ labels = definition['metadata']['labels']
+ labels['template.cnv.io/type'] = 'vm'
+
+ # Fill in Openshift/Kubevirt template annotations:
+ annotations = definition['metadata']['annotations']
+ if self.params.get('display_name'):
+ annotations['openshift.io/display-name'] = self.params.get('display_name')
+ if self.params.get('description'):
+ annotations['description'] = self.params.get('description')
+ if self.params.get('long_description'):
+ annotations['openshift.io/long-description'] = self.params.get('long_description')
+ if self.params.get('provider_display_name'):
+ annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name')
+ if self.params.get('documentation_url'):
+ annotations['openshift.io/documentation-url'] = self.params.get('documentation_url')
+ if self.params.get('support_url'):
+ annotations['openshift.io/support-url'] = self.params.get('support_url')
+ if self.params.get('icon_class'):
+ annotations['iconClass'] = self.params.get('icon_class')
+ if self.params.get('version'):
+ annotations['template.cnv.io/version'] = self.params.get('version')
+
+ # TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params:
+ if self.params.get('editable'):
+ annotations['template.cnv.io/editable'] = self.params.get('editable')
+
+ # Set defaults annotations:
+ if self.params.get('default_disk'):
+ annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name')
+ if self.params.get('default_volume'):
+ annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name')
+ if self.params.get('default_nic'):
+ annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name')
+ if self.params.get('default_network'):
+ annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name')
+
+ # Process objects:
+ self.client = self.get_api_client()
+ definition['objects'] = []
+ objects = self.params.get('objects') or []
+ for obj in objects:
+ if obj['kind'] != 'VirtualMachine':
+ definition['objects'].append(obj)
+ else:
+ vm_definition = virtdict()
+
+ # Set VM defaults:
+ if self.params.get('default_disk'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')]
+ if self.params.get('default_volume'):
+ vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')]
+ if self.params.get('default_nic'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')]
+ if self.params.get('default_network'):
+ vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')]
+
+ # Set kubevirt API version:
+ vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION)
+
+ # Construct k8s vm API object:
+ vm_template = vm_definition['spec']['template']
+ dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj)
+
+ definition['objects'].append(vm_def)
+
+ # Create template:
+ resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates')
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': result['changed'],
+ 'kubevirt_template': result.pop('result'),
+ 'result': result,
+ })
+
+
+def main():
+ module = KubeVirtVMTemplate()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py
new file mode 100644
index 00000000..4466bee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_vm
+
+short_description: Manage KubeVirt virtual machine
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
+ - "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
+ - "I(absent) - Remove a virtual machine."
+ - "I(running) - Create or update a virtual machine and run it."
+ - "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
+ default: "present"
+ choices:
+ - present
+ - absent
+ - running
+ - stopped
+ type: str
+ name:
+ description:
+ - Name of the virtual machine.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine exists.
+ required: true
+ type: str
+ ephemeral:
+ description:
+ - If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
+ - Works only with C(state) I(present) and I(absent).
+ type: bool
+ default: false
+ datavolumes:
+ description:
+ - "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
+ launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
+ it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
+ type: list
+ template:
+ description:
+ - "Name of Template to be used in creation of a virtual machine."
+ type: str
+ template_parameters:
+ description:
+ - "New values of parameters from Template."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Start virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+
+- name: Create virtual machine 'myvm' and start it
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64Mi
+ cpu_cores: 1
+ bootloader: efi
+ smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
+ cpu_model: Conroe
+ headless: true
+ hugepage_size: 2Mi
+ tablets:
+ - bus: virtio
+ name: tablet1
+ cpu_limit: 3
+ cpu_shares: 2
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Create virtual machine 'myvm' with multus network interface
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: vms
+ memory: 512M
+ interfaces:
+ - name: default
+ bridge: {}
+ network:
+ pod: {}
+ - name: mynet
+ bridge: {}
+ network:
+ multus:
+ networkName: mynetconf
+
+- name: Combine inline definition with Ansible parameters
+ community.general.kubevirt_vm:
+ # Kubernetes specification:
+ definition:
+ metadata:
+ labels:
+ app: galaxy
+ service: web
+ origin: vmware
+
+ # Ansible parameters:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64M
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start ephemeral virtual machine 'myvm' and wait to be running
+ community.general.kubevirt_vm:
+ ephemeral: true
+ state: running
+ wait: true
+ wait_timeout: 180
+ name: myvm
+ namespace: vms
+ memory: 64M
+ labels:
+ kubevirt.io/vm: myvm
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start fedora vm with cloud init
+ community.general.kubevirt_vm:
+ state: running
+ wait: true
+ name: myvm
+ namespace: vms
+ memory: 1024M
+ cloud_init_nocloud:
+ userData: |-
+ #cloud-config
+ password: fedora
+ chpasswd: { expire: False }
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/fedora-cloud-container-disk-demo:latest
+ path: /disk/fedora.qcow2
+ disk:
+ bus: virtio
+ node_affinity:
+ soft:
+ - weight: 1
+ term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S2
+
+- name: Create virtual machine with datavolume and specify node affinity
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: default
+ memory: 1024Mi
+ datavolumes:
+ - name: mydv
+ source:
+ http:
+ url: https://url/disk.qcow2
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ storage: 5Gi
+ node_affinity:
+ hard:
+ - term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S1
+
+- name: Remove virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: absent
+ name: myvm
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_vm:
+ description:
+ - The virtual machine dictionary specification returned by the API.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+ VM_SPEC_DEF_ARG_SPEC
+)
+
+VM_ARG_SPEC = {
+ 'ephemeral': {'type': 'bool', 'default': False},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent', 'running', 'stopped'
+ ],
+ 'default': 'present'
+ },
+ 'datavolumes': {'type': 'list'},
+ 'template': {'type': 'str'},
+ 'template_parameters': {'type': 'dict'},
+}
+
+# Which params (can) modify 'spec:' contents of a VM:
+VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
+
+
+class KubeVirtVM(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VM_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _wait_for_vmi_running(self):
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ phase = status.get('phase', None)
+ if phase == 'Running':
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
+
+ def _wait_for_vm_state(self, new_state):
+ if new_state == 'running':
+ want_created = want_ready = True
+ else:
+ want_created = want_ready = False
+
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ created = status.get('created', False)
+ ready = status.get('ready', False)
+ if (created, ready) == (want_created, want_ready):
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
+ "Maybe try a higher wait_timeout value?".format(new_state))
+
+ def manage_vm_state(self, new_state, already_changed):
+ new_running = True if new_state == 'running' else False
+ changed = False
+ k8s_obj = {}
+
+ if not already_changed:
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not k8s_obj:
+ self.fail("VirtualMachine object disappeared during module operation, aborting.")
+ if k8s_obj.spec.get('running', False) == new_running:
+ return False, k8s_obj
+
+ newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
+ k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
+ self.name, self.namespace, merge_type='merge')
+ if err:
+ self.fail_json(**err)
+ else:
+ changed = True
+
+ if self.params.get('wait'):
+ k8s_obj = self._wait_for_vm_state(new_state)
+
+ return changed, k8s_obj
+
+ def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
+ def set_template_default(default_name, default_name_index, definition_spec):
+ default_value = proccess_template['metadata']['annotations'][default_name]
+ if default_value:
+ values = definition_spec[default_name_index]
+ default_values = [d for d in values if d.get('name') == default_value]
+ defaults[default_name_index] = default_values
+ if definition_spec[default_name_index] is None:
+ definition_spec[default_name_index] = []
+ definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
+
+ devices = processedtemplate['spec']['template']['spec']['domain']['devices']
+ spec = processedtemplate['spec']['template']['spec']
+
+ set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
+ set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
+ set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
+ set_template_default('defaults.template.cnv.io/network', 'networks', spec)
+
+ def construct_definition(self, kind, our_state, ephemeral):
+ definition = virtdict()
+ processedtemplate = {}
+
+ # Construct the API object definition:
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+ vm_template = self.params.get('template')
+ if vm_template:
+ # Find the template the VM should be created from:
+ template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
+ proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
+
+ # Set proper template values taken from module option 'template_parameters':
+ for k, v in self.params.get('template_parameters', {}).items():
+ for parameter in proccess_template.parameters:
+ if parameter.name == k:
+ parameter.value = v
+
+ # Proccess the template:
+ processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
+ processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
+
+ # Process defaults of the template:
+ self._process_template_defaults(proccess_template, processedtemplate, defaults)
+
+ if not ephemeral:
+ definition['spec']['running'] = our_state == 'running'
+ template = definition if ephemeral else definition['spec']['template']
+ template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
+ dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
+
+ return self.merge_dicts(definition, processedtemplate)
+
+ def execute_module(self):
+ # Parse parameters specific to this module:
+ ephemeral = self.params.get('ephemeral')
+ k8s_state = our_state = self.params.get('state')
+ kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
+ _used_params = [name for name in self.params if self.params[name] is not None]
+ # Is 'spec:' getting changed?
+ vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
+ changed = False
+ crud_executed = False
+ method = ''
+
+ # Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
+ if ephemeral:
+ # Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
+ if our_state == 'running':
+ self.params['state'] = k8s_state = 'present'
+ elif our_state == 'stopped':
+ self.params['state'] = k8s_state = 'absent'
+ else:
+ if our_state != 'absent':
+ self.params['state'] = k8s_state = 'present'
+
+ # Start with fetching the current object to make sure it exists
+ # If it does, but we end up not performing any operations on it, at least we'll be able to return
+ # its current contents as part of the final json
+ self.client = self.get_api_client()
+ self._kind_resource = self.find_supported_resource(kind)
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
+ self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
+
+ # If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
+ # Also check_mode always warrants a CRUD, as that'll produce a sane result
+ if vm_spec_change or k8s_state == 'absent' or self.check_mode:
+ definition = self.construct_definition(kind, our_state, ephemeral)
+ result = self.execute_crud(kind, definition)
+ changed = result['changed']
+ k8s_obj = result['result']
+ method = result['method']
+ crud_executed = True
+
+ if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
+ # Waiting for k8s_state==absent is handled inside execute_crud()
+ k8s_obj = self._wait_for_vmi_running()
+
+ if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
+ # State==present/absent doesn't involve any additional VMI state management and is fully
+ # handled inside execute_crud() (including wait logic)
+ patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
+ changed = changed or patched
+ if changed:
+ method = method or 'patch'
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_vm': self.fix_serialization(k8s_obj),
+ 'method': method
+ })
+
+
+def main():
+ module = KubeVirtVM()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py
new file mode 100644
index 00000000..919d8d7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py
@@ -0,0 +1,514 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Martin Migasiewicz <migasiew.nk@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: launchd
+author:
+- Martin Migasiewicz (@martinm82)
+short_description: Manage macOS services
+version_added: 1.0.0
+description:
+- Manage launchd services on target macOS hosts.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - Launchd does not support C(restarted) nor C(reloaded) natively.
+ These will trigger a stop/start (restarted) or an unload/load
+ (reloaded).
+ - C(restarted) unloads and loads the service before start to ensure
+ that the latest job definition (plist) is used.
+ - C(reloaded) unloads and loads the service to ensure that the latest
+ job definition (plist) is used. Whether a service is started or
+ stopped depends on the content of the definition file.
+ type: str
+ choices: [ reloaded, restarted, started, stopped, unloaded ]
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ force_stop:
+ description:
+ - Whether the service should not be restarted automatically by launchd.
+ - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
+ In case this is set to true, stopping a service will cause that launchd starts the service again.
+ - Set this option to C(yes) to let this module change the 'KeepAlive' attribute to false.
+ type: bool
+ default: no
+notes:
+- A user must privileged to manage services using this module.
+requirements:
+- A system managed by launchd
+- The plistlib python library
+'''
+
+EXAMPLES = r'''
+- name: Make sure spotify webhelper is started
+ community.general.launchd:
+ name: com.spotify.webhelper
+ state: started
+
+- name: Deploy custom memcached job definition
+ template:
+ src: org.memcached.plist.j2
+ dest: /Library/LaunchDaemons/org.memcached.plist
+
+- name: Run memcached
+ community.general.launchd:
+ name: org.memcached
+ state: started
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+ force_stop: yes
+
+- name: Restart memcached
+ community.general.launchd:
+ name: org.memcached
+ state: restarted
+
+- name: Unload memcached
+ community.general.launchd:
+ name: org.memcached
+ state: unloaded
+'''
+
+RETURN = r'''
+status:
+ description: Metadata about service status
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+'''
+
+import os
+import plistlib
+from abc import ABCMeta, abstractmethod
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class ServiceState:
+ UNKNOWN = 0
+ LOADED = 1
+ STOPPED = 2
+ STARTED = 3
+ UNLOADED = 4
+
+ @staticmethod
+ def to_string(state):
+ strings = {
+ ServiceState.UNKNOWN: 'unknown',
+ ServiceState.LOADED: 'loaded',
+ ServiceState.STOPPED: 'stopped',
+ ServiceState.STARTED: 'started',
+ ServiceState.UNLOADED: 'unloaded'
+ }
+ return strings[state]
+
+
+class Plist:
+ def __init__(self, module, service):
+ self.__changed = False
+ self.__service = service
+
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+
+ # Check if readPlist is available or not
+ self.old_plistlib = hasattr(plistlib, 'readPlist')
+
+ self.__file = self.__find_service_plist(service)
+ if self.__file is None:
+ msg = 'Unable to infer the path of %s service plist file' % service
+ if pid is None and state == ServiceState.UNLOADED:
+ msg += ' and it was not found among active services'
+ module.fail_json(msg=msg)
+ self.__update(module)
+
+ @staticmethod
+ def __find_service_plist(service_name):
+ """Finds the plist file associated with a service"""
+
+ launchd_paths = [
+ os.path.expanduser('~/Library/LaunchAgents'),
+ '/Library/LaunchAgents',
+ '/Library/LaunchDaemons',
+ '/System/Library/LaunchAgents',
+ '/System/Library/LaunchDaemons'
+ ]
+
+ for path in launchd_paths:
+ try:
+ files = os.listdir(path)
+ except OSError:
+ continue
+
+ filename = '%s.plist' % service_name
+ if filename in files:
+ return os.path.join(path, filename)
+ return None
+
+ def __update(self, module):
+ self.__handle_param_enabled(module)
+ self.__handle_param_force_stop(module)
+
+ def __read_plist_file(self, module):
+ service_plist = {}
+ if self.old_plistlib:
+ return plistlib.readPlist(self.__file)
+
+ # readPlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'rb') as plist_fp:
+ service_plist = plistlib.load(plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to read plist file "
+ "%s due to %s" % (self.__file, to_native(e)))
+ return service_plist
+
+ def __write_plist_file(self, module, service_plist=None):
+ if not service_plist:
+ service_plist = {}
+
+ if self.old_plistlib:
+ plistlib.writePlist(service_plist, self.__file)
+ return
+ # writePlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'wb') as plist_fp:
+ plistlib.dump(service_plist, plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to write to plist file "
+ " %s due to %s" % (self.__file, to_native(e)))
+
+ def __handle_param_enabled(self, module):
+ if module.params['enabled'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Enable/disable service startup at boot if requested
+ # Launchctl does not expose functionality to set the RunAtLoad
+ # attribute of a job definition. So we parse and modify the job
+ # definition plist file directly for this purpose.
+ if module.params['enabled'] is not None:
+ enabled = service_plist.get('RunAtLoad', False)
+ if module.params['enabled'] != enabled:
+ service_plist['RunAtLoad'] = module.params['enabled']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def __handle_param_force_stop(self, module):
+ if module.params['force_stop'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Set KeepAlive to false in case force_stop is defined to avoid
+ # that the service gets restarted when stopping was requested.
+ if module.params['force_stop'] is not None:
+ keep_alive = service_plist.get('KeepAlive', False)
+ if module.params['force_stop'] and keep_alive:
+ service_plist['KeepAlive'] = not module.params['force_stop']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def is_changed(self):
+ return self.__changed
+
+ def get_file(self):
+ return self.__file
+
+
+class LaunchCtlTask(object):
+ __metaclass__ = ABCMeta
+ WAITING_TIME = 5 # seconds
+
+ def __init__(self, module, service, plist):
+ self._module = module
+ self._service = service
+ self._plist = plist
+ self._launch = self._module.get_bin_path('launchctl', True)
+
+ def run(self):
+ """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc.
+ and returns the new state and pid.
+ """
+ self.runCommand()
+ return self.get_state()
+
+ @abstractmethod
+ def runCommand(self):
+ pass
+
+ def get_state(self):
+ rc, out, err = self._launchctl("list")
+ if rc != 0:
+ self._module.fail_json(
+ msg='Failed to get status of %s' % (self._launch))
+
+ state = ServiceState.UNLOADED
+ service_pid = "-"
+ status_code = None
+ for line in out.splitlines():
+ if line.strip():
+ pid, last_exit_code, label = line.split('\t')
+ if label.strip() == self._service:
+ service_pid = pid
+ status_code = last_exit_code
+
+ # From launchctl man page:
+ # If the number [...] is negative, it represents the
+ # negative of the signal which killed the job. Thus,
+ # "-15" would indicate that the job was terminated with
+ # SIGTERM.
+ if last_exit_code not in ['0', '-2', '-3', '-9', '-15']:
+ # Something strange happened and we have no clue in
+ # which state the service is now. Therefore we mark
+ # the service state as UNKNOWN.
+ state = ServiceState.UNKNOWN
+ elif pid != '-':
+ # PID seems to be an integer so we assume the service
+ # is started.
+ state = ServiceState.STARTED
+ else:
+ # Exit code is 0 and PID is not available so we assume
+ # the service is stopped.
+ state = ServiceState.STOPPED
+ break
+ return (state, service_pid, status_code, err)
+
+ def start(self):
+ rc, out, err = self._launchctl("start")
+ # Unfortunately launchd does not wait until the process really started.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def stop(self):
+ rc, out, err = self._launchctl("stop")
+ # Unfortunately launchd does not wait until the process really stopped.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def restart(self):
+ # TODO: check for rc, out, err
+ self.stop()
+ return self.start()
+
+ def reload(self):
+ # TODO: check for rc, out, err
+ self.unload()
+ return self.load()
+
+ def load(self):
+ return self._launchctl("load")
+
+ def unload(self):
+ return self._launchctl("unload")
+
+ def _launchctl(self, command):
+ service_or_plist = self._plist.get_file() if command in [
+ 'load', 'unload'] else self._service if command in ['start', 'stop'] else ""
+
+ rc, out, err = self._module.run_command(
+ '%s %s %s' % (self._launch, command, service_or_plist))
+
+ if rc != 0:
+ msg = "Unable to %s '%s' (%s): '%s'" % (
+ command, self._service, self._plist.get_file(), err)
+ self._module.fail_json(msg=msg)
+
+ return (rc, out, err)
+
+
+class LaunchCtlStart(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state in (ServiceState.STOPPED, ServiceState.LOADED):
+ self.reload()
+ self.start()
+ elif state == ServiceState.STARTED:
+ # In case the service is already in started state but the
+ # job definition was changed we need to unload/load the
+ # service and start the service again.
+ if self._plist.is_changed():
+ self.reload()
+ self.start()
+ elif state == ServiceState.UNLOADED:
+ self.load()
+ self.start()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and start the service again.
+ self.reload()
+ self.start()
+
+
+class LaunchCtlStop(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStop, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.STOPPED:
+ # In case the service is stopped and we might later decide
+ # to start it, we need to reload the job definition by
+ # forcing an unload and load first.
+ # Afterwards we need to stop it as it might have been
+ # started again (KeepAlive or RunAtLoad).
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state in (ServiceState.STARTED, ServiceState.LOADED):
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and stop the service gracefully.
+ self.reload()
+ self.stop()
+
+
+class LaunchCtlReload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlReload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.UNLOADED:
+ # launchd throws an error if we do an unload on an already
+ # unloaded service.
+ self.load()
+ else:
+ self.reload()
+
+
+class LaunchCtlUnload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlUnload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+ self.unload()
+
+
+class LaunchCtlRestart(LaunchCtlReload):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlRestart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ super(LaunchCtlRestart, self).runCommand()
+ self.start()
+
+
+class LaunchCtlList(LaunchCtlTask):
+ def __init__(self, module, service):
+ super(LaunchCtlList, self).__init__(module, service, None)
+
+ def runCommand(self):
+ # Do nothing, the list functionality is done by the
+ # base class run method.
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
+ enabled=dict(type='bool'),
+ force_stop=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[
+ ['state', 'enabled'],
+ ],
+ )
+
+ service = module.params['name']
+ action = module.params['state']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': {},
+ }
+
+ # We will tailor the plist file in case one of the options
+ # (enabled, force_stop) was specified.
+ plist = Plist(module, service)
+ result['changed'] = plist.is_changed()
+
+ # Gather information about the service to be controlled.
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+ result['status']['previous_state'] = ServiceState.to_string(state)
+ result['status']['previous_pid'] = pid
+
+ # Map the actions to specific tasks
+ tasks = {
+ 'started': LaunchCtlStart(module, service, plist),
+ 'stopped': LaunchCtlStop(module, service, plist),
+ 'restarted': LaunchCtlRestart(module, service, plist),
+ 'reloaded': LaunchCtlReload(module, service, plist),
+ 'unloaded': LaunchCtlUnload(module, service, plist)
+ }
+
+ status_code = '0'
+ # Run the requested task
+ if not module.check_mode:
+ state, pid, status_code, err = tasks[action].run()
+
+ result['status']['current_state'] = ServiceState.to_string(state)
+ result['status']['current_pid'] = pid
+ result['status']['status_code'] = status_code
+ result['status']['error'] = err
+
+ if (result['status']['current_state'] != result['status']['previous_state'] or
+ result['status']['current_pid'] != result['status']['previous_pid']):
+ result['changed'] = True
+ if module.check_mode:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py
new file mode 100644
index 00000000..3c990205
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ type: str
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ aliases: [url]
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ default: present
+ choices: [present, absent, updated]
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(no).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+- name: Install the overlay mozilla which is on the central overlays list
+ community.general.layman:
+ name: mozilla
+
+- name: Install the overlay cvut from the specified alternative list
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+
+- name: Update (sync) the overlay cvut or install if not installed yet
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
+
+- name: Update (sync) all of the installed overlays
+ community.general.layman:
+ name: ALL
+ state: updated
+
+- name: Uninstall the overlay cvut
+ community.general.layman:
+ name: cvut
+ state: absent
+'''
+
+import shutil
+import traceback
+
+from os import path
+
+LAYMAN_IMP_ERR = None
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ LAYMAN_IMP_ERR = traceback.format_exc()
+ HAS_LAYMAN_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+USERAGENT = 'ansible-httpget'
+
+
+class ModuleError(Exception):
+ pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors():
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [str(item[1]) for item in layman.sync_results[2]]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR)
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py
new file mode 100644
index 00000000..6f850791
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lbu
+
+short_description: Local Backup Utility for Alpine Linux
+
+version_added: '0.2.0'
+
+description:
+- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
+
+options:
+ commit:
+ description:
+ - Control whether to commit changed files.
+ type: bool
+ exclude:
+ description:
+ - List of paths to exclude.
+ type: list
+ elements: str
+ include:
+ description:
+ - List of paths to include.
+ type: list
+ elements: str
+
+author:
+- Kaarle Ritvanen (@kunkku)
+'''
+
+EXAMPLES = '''
+# Commit changed files (if any)
+- name: Commit
+ community.general.lbu:
+ commit: true
+
+# Exclude path and commit
+- name: Exclude directory
+ community.general.lbu:
+ commit: true
+ exclude:
+ - /etc/opt
+
+# Include paths without committing
+- name: Include file and directory
+ community.general.lbu:
+ include:
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+'''
+
+RETURN = '''
+msg:
+ description: Error message
+ type: str
+ returned: on failure
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os.path
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec={
+ 'commit': {'type': 'bool'},
+ 'exclude': {'type': 'list', 'elements': 'str'},
+ 'include': {'type': 'list', 'elements': 'str'}
+ },
+ supports_check_mode=True
+ )
+
+ changed = False
+
+ def run_lbu(*args):
+ code, stdout, stderr = module.run_command(
+ [module.get_bin_path('lbu', required=True)] + list(args)
+ )
+ if code:
+ module.fail_json(changed=changed, msg=stderr)
+ return stdout
+
+ update = False
+ commit = False
+
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ paths = run_lbu(param, '-l').split('\n')
+ for path in module.params[param]:
+ if os.path.normpath('/' + path)[1:] not in paths:
+ update = True
+
+ if module.params['commit']:
+ commit = update or run_lbu('status') > ''
+
+ if module.check_mode:
+ module.exit_json(changed=update or commit)
+
+ if update:
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ run_lbu(param, *module.params[param])
+ changed = True
+
+ if commit:
+ run_lbu('commit')
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py
new file mode 100644
index 00000000..f983b857
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ldap_attr
+short_description: Add or remove LDAP attribute values
+description:
+ - Add or remove LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.'
+ alternative: 'Use M(community.general.ldap_attrs) instead. Deprecated in community.general 0.2.0.'
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ name:
+ description:
+ - The name of the attribute to modify.
+ type: str
+ required: true
+ state:
+ description:
+ - The state of the attribute values.
+ - If C(present), all given values will be added if they're missing.
+ - If C(absent), all given values will be removed if present.
+ - If C(exact), the set of values will be forced to exactly those provided and no others.
+ - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed.
+ type: str
+ choices: [ absent, exact, present ]
+ default: present
+ values:
+ description:
+ - The value(s) to add or remove. This can be a string or a list of
+ strings. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ type: raw
+ required: true
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcSuffix
+ values: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcAccess
+ values:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcDbIndex
+ values: "{{ item }}"
+ with_items:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: "{{ item.key }}"
+ values: "{{ item.value }}"
+ state: exact
+ with_dict:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ args: "{{ ldap_auth }}"
+'''
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttr(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Normalize values
+ if isinstance(self.module.params['values'], list):
+ self.values = list(map(to_bytes, self.module.params['values']))
+ else:
+ self.values = [to_bytes(self.module.params['values'])]
+
+ def add(self):
+ values_to_add = list(filter(self._is_value_absent, self.values))
+
+ if len(values_to_add) > 0:
+ modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def delete(self):
+ values_to_delete = list(filter(self._is_value_present, self.values))
+
+ if len(values_to_delete) > 0:
+ modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def exact(self):
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % self.name, e)
+
+ current = results[0][1].get(self.name, [])
+ modlist = []
+
+ if frozenset(self.values) != frozenset(current):
+ if len(current) == 0:
+ modlist = [(ldap.MOD_ADD, self.name, self.values)]
+ elif len(self.values) == 0:
+ modlist = [(ldap.MOD_DELETE, self.name, None)]
+ else:
+ modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
+
+ return modlist
+
+ def _is_value_present(self, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, self.name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ name=dict(type='str', required=True),
+ params=dict(type='dict'),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ values=dict(type='raw', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed in since it circumvents Ansible's option handling")
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttr(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py
new file mode 100644
index 00000000..ae5cb7fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Maciej Delmanowski <drybjed@gmail.com>
+# Copyright: (c) 2017, Alexander Korinek <noles@a3k.net>
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ldap_attrs
+short_description: Add or remove multiple LDAP attribute values
+description:
+ - Add or remove multiple LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+version_added: '0.2.0'
+author:
+ - Jiri Tyr (@jtyr)
+ - Alexander Korinek (@noles)
+ - Maciej Delmanowski (@drybjed)
+requirements:
+ - python-ldap
+options:
+ state:
+ required: false
+ type: str
+ choices: [present, absent, exact]
+ default: present
+ description:
+ - The state of the attribute values. If C(present), all given attribute
+ values will be added if they're missing. If C(absent), all given
+ attribute values will be removed if present. If C(exact), the set of
+ attribute values will be forced to exactly those provided and no others.
+ If I(state=exact) and the attribute I(value) is empty, all values for
+ this attribute will be removed.
+ attributes:
+ required: true
+ type: dict
+ description:
+ - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ ordered:
+ required: false
+ type: bool
+ default: 'no'
+ description:
+ - If C(yes), prepend list values with X-ORDERED index numbers in all
+ attributes specified in the current task. This is useful mostly with
+ I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcSuffix: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+# An alternative approach with automatic X-ORDERED numbering
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ ordered: yes
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ state: exact
+
+- name: Remove an attribute with a specific value
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: "An example user account"
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+- name: Remove specified attribute(s) from an entry
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+'''
+
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+import re
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttrs(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.attrs = self.module.params['attributes']
+ self.state = self.module.params['state']
+ self.ordered = self.module.params['ordered']
+
+ def _order_values(self, values):
+ """ Preprend X-ORDERED index numbers to attribute's values. """
+ ordered_values = []
+
+ if isinstance(values, list):
+ for index, value in enumerate(values):
+ cleaned_value = re.sub(r'^\{\d+\}', '', value)
+ ordered_values.append('{' + str(index) + '}' + cleaned_value)
+
+ return ordered_values
+
+ def _normalize_values(self, values):
+ """ Normalize attribute's values. """
+ norm_values = []
+
+ if isinstance(values, list):
+ if self.ordered:
+ norm_values = list(map(to_bytes,
+ self._order_values(list(map(str,
+ values)))))
+ else:
+ norm_values = list(map(to_bytes, values))
+ else:
+ norm_values = [to_bytes(str(values))]
+
+ return norm_values
+
+ def add(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_absent(name, value):
+ modlist.append((ldap.MOD_ADD, name, value))
+
+ return modlist
+
+ def delete(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_present(name, value):
+ modlist.append((ldap.MOD_DELETE, name, value))
+
+ return modlist
+
+ def exact(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % name, e)
+
+ current = results[0][1].get(name, [])
+
+ if frozenset(norm_values) != frozenset(current):
+ if len(current) == 0:
+ modlist.append((ldap.MOD_ADD, name, norm_values))
+ elif len(norm_values) == 0:
+ modlist.append((ldap.MOD_DELETE, name, None))
+ else:
+ modlist.append((ldap.MOD_REPLACE, name, norm_values))
+
+ return modlist
+
+ def _is_value_present(self, name, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, name, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(name, value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(type='dict', required=True),
+ ordered=dict(type='bool', default=False, required=False),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttrs(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py
new file mode 100644
index 00000000..7ee0c3dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_entry
+short_description: Add or remove LDAP entries.
+description:
+ - Add or remove LDAP entries. This module only asserts the existence or
+ non-existence of an LDAP entry, not its attributes. To assert the
+ attribute values of an entry, see M(community.general.ldap_attr).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ attributes:
+ description:
+ - If I(state=present), attributes necessary to create an entry. Existing
+ entries are never modified. To assert specific attribute values on an
+ existing entry, use M(community.general.ldap_attr) module instead.
+ type: dict
+ objectClass:
+ description:
+ - If I(state=present), value or list of values to use when creating
+ the entry. It can either be a string or an actual list of
+ strings.
+ type: list
+ elements: str
+ state:
+ description:
+ - The target state of the entry.
+ choices: [present, absent]
+ default: present
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = """
+- name: Make sure we have a parent entry for users
+ community.general.ldap_entry:
+ dn: ou=users,dc=example,dc=com
+ objectClass: organizationalUnit
+
+- name: Make sure we have an admin user
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP administrator
+ userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ args: "{{ ldap_auth }}"
+"""
+
+
+RETURN = """
+# Default return values
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap.modlist
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapEntry(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.state = self.module.params['state']
+
+ # Add the objectClass into the list of attributes
+ self.module.params['attributes']['objectClass'] = (
+ self.module.params['objectClass'])
+
+ # Load attributes
+ if self.state == 'present':
+ self.attrs = self._load_attrs()
+
+ def _load_attrs(self):
+ """ Turn attribute's value to array. """
+ attrs = {}
+
+ for name, value in self.module.params['attributes'].items():
+ if isinstance(value, list):
+ attrs[name] = list(map(to_bytes, value))
+ else:
+ attrs[name] = [to_bytes(value)]
+
+ return attrs
+
+ def add(self):
+ """ If self.dn does not exist, returns a callable that will add it. """
+ def _add():
+ self.connection.add_s(self.dn, modlist)
+
+ if not self._is_entry_present():
+ modlist = ldap.modlist.addModlist(self.attrs)
+ action = _add
+ else:
+ action = None
+
+ return action
+
+ def delete(self):
+ """ If self.dn exists, returns a callable that will delete it. """
+ def _delete():
+ self.connection.delete_s(self.dn)
+
+ if self._is_entry_present():
+ action = _delete
+ else:
+ action = None
+
+ return action
+
+ def _is_entry_present(self):
+ try:
+ self.connection.search_s(self.dn, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+ else:
+ is_present = True
+
+ return is_present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(default={}, type='dict'),
+ objectClass=dict(type='list', elements='str'),
+ params=dict(type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ required_if=[('state', 'present', ['objectClass'])],
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed since it circumvents Ansible's option handling")
+
+ state = module.params['state']
+
+ # Instantiate the LdapEntry object
+ ldap = LdapEntry(module)
+
+ # Get the action function
+ if state == 'present':
+ action = ldap.add()
+ elif state == 'absent':
+ action = ldap.delete()
+
+ # Perform the action
+ if action is not None and not module.check_mode:
+ try:
+ action()
+ except Exception as e:
+ module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=(action is not None))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py
new file mode 100644
index 00000000..8d86ee93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2018, Keller Fuchs <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_passwd
+short_description: Set passwords in LDAP.
+description:
+ - Set a password for an LDAP entry. This module only asserts that
+ a given password is valid for a given entry. To assert the
+ existence of an entry, see M(community.general.ldap_entry).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Keller Fuchs (@KellerFuchs)
+requirements:
+ - python-ldap
+options:
+ passwd:
+ description:
+ - The (plaintext) password to be set for I(dn).
+ type: str
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = """
+- name: Set a password for the admin user
+ community.general.ldap_passwd:
+ dn: cn=admin,dc=example,dc=com
+ passwd: "{{ vault_secret }}"
+
+- name: Setting passwords in bulk
+ community.general.ldap_passwd:
+ dn: "{{ item.key }}"
+ passwd: "{{ item.value }}"
+ with_dict:
+ alice: alice123123
+ bob: "|30b!"
+ admin: "{{ vault_secret }}"
+"""
+
+RETURN = """
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapPasswd(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.passwd = self.module.params['passwd']
+
+ def passwd_check(self):
+ try:
+ tmp_con = ldap.initialize(self.server_uri)
+ except ldap.LDAPError as e:
+ self.fail("Cannot initialize LDAP connection", e)
+
+ if self.start_tls:
+ try:
+ tmp_con.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ tmp_con.simple_bind_s(self.dn, self.passwd)
+ except ldap.INVALID_CREDENTIALS:
+ return True
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+ else:
+ return False
+ finally:
+ tmp_con.unbind()
+
+ def passwd_set(self):
+ # Exit early if the password is already valid
+ if not self.passwd_check():
+ return False
+
+ # Change the password (or throw an exception)
+ try:
+ self.connection.passwd_s(self.dn, None, self.passwd)
+ except ldap.LDAPError as e:
+ self.fail("Unable to set password", e)
+
+ # Password successfully changed
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(passwd=dict(no_log=True)),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ ldap = LdapPasswd(module)
+
+ if module.check_mode:
+ module.exit_json(changed=ldap.passwd_check())
+
+ module.exit_json(changed=ldap.passwd_set())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py
new file mode 100644
index 00000000..3b1a2833
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2020, Sebastian Pfahl <eryx@gmx.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ldap_search
+version_added: '0.2.0'
+short_description: Search for entries in a LDAP server
+description:
+ - Return the results of an LDAP search.
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Sebastian Pfahl (@eryx12o45)
+requirements:
+ - python-ldap
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The LDAP DN to search in.
+ scope:
+ choices: [base, onelevel, subordinate, children]
+ default: base
+ type: str
+ description:
+ - The LDAP scope to use.
+ filter:
+ default: '(objectClass=*)'
+ type: str
+ description:
+ - Used for filtering the LDAP search result.
+ attrs:
+ type: list
+ elements: str
+ description:
+ - A list of attributes for limiting the result. Use an
+ actual list or a comma-separated string.
+ schema:
+ default: false
+ type: bool
+ description:
+ - Set to C(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides I(attrs) when provided.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+"""
+
+EXAMPLES = r"""
+- name: Return all entries within the 'groups' organizational unit.
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ register: ldap_groups
+
+- name: Return GIDs for all groups
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ scope: "onelevel"
+ attrs:
+ - "gidNumber"
+ register: ldap_group_gids
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ dn=dict(type='str', required=True),
+ scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']),
+ filter=dict(type='str', default='(objectClass=*)'),
+ attrs=dict(type='list', elements='str'),
+ schema=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if not module.check_mode:
+ try:
+ LdapSearch(module).main()
+ except Exception as exception:
+ module.fail_json(msg="Attribute action failed.", details=to_native(exception))
+
+ module.exit_json(changed=False)
+
+
+def _extract_entry(dn, attrs):
+ extracted = {'dn': dn}
+ for attr, val in list(attrs.items()):
+ if len(val) == 1:
+ extracted[attr] = val[0]
+ else:
+ extracted[attr] = val
+ return extracted
+
+
+class LdapSearch(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ self.dn = self.module.params['dn']
+ self.filterstr = self.module.params['filter']
+ self.attrlist = []
+ self._load_scope()
+ self._load_attrs()
+ self._load_schema()
+
+ def _load_schema(self):
+ self.schema = self.module.boolean(self.module.params['schema'])
+ if self.schema:
+ self.attrsonly = 1
+ else:
+ self.attrsonly = 0
+
+ def _load_scope(self):
+ spec = dict(
+ base=ldap.SCOPE_BASE,
+ onelevel=ldap.SCOPE_ONELEVEL,
+ subordinate=ldap.SCOPE_SUBORDINATE,
+ children=ldap.SCOPE_SUBTREE,
+ )
+ self.scope = spec[self.module.params['scope']]
+
+ def _load_attrs(self):
+ self.attrlist = self.module.params['attrs'] or None
+
+ def main(self):
+ results = self.perform_search()
+ self.module.exit_json(changed=False, results=results)
+
+ def perform_search(self):
+ try:
+ results = self.connection.search_s(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly
+ )
+ if self.schema:
+ return [dict(dn=result[0], attrs=list(result[1].keys())) for result in results]
+ else:
+ return [_extract_entry(result[0], result[1]) for result in results]
+ except ldap.NO_SUCH_OBJECT:
+ self.module.fail_json(msg="Base not found: {0}".format(self.dn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py
new file mode 100644
index 00000000..d0fd406d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+author: "Seth Edwards (@Sedward)"
+requirements: []
+options:
+ user:
+ type: str
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account api key
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ type: list
+ description:
+ - See examples
+'''
+
+EXAMPLES = '''
+- name: Create a simple annotation event with a source
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
+
+- name: Create an annotation that includes a link
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
+ links:
+ - rel: example
+ href: http://www.example.com/deploy
+
+- name: Create an annotation with a start_time and end_time
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] is not None:
+ params['source'] = module.params['source']
+ if module.params['description'] is not None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] is not None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] is not None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] is not None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ response_code = str(info['status'])
+ response_body = info['body']
+ if info['status'] != 201:
+ if info['status'] >= 400:
+ module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
+ else:
+ module.fail_json(msg="Request Failed. Response code: " + response_code)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(required=False, default=None, type='int'),
+ links=dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py
new file mode 100644
index 00000000..a35b25b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode
+short_description: Manage instances on the Linode Public Cloud
+description:
+ - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ default: present
+ type: str
+ api_key:
+ description:
+ - Linode API key
+ type: str
+ name:
+ description:
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ required: true
+ type: str
+ displaygroup:
+ description:
+ - Add the instance to a Display Group in Linode Manager.
+ type: str
+ linode_id:
+ description:
+ - Unique ID of a linode server. This value is read-only in the sense that
+ if you specify it on creation of a Linode it will not be used. The
+ Linode API generates these IDs and we can those generated value here to
+ reference a Linode more specifically. This is useful for idempotence.
+ aliases: [ lid ]
+ type: int
+ additional_disks:
+ description:
+ - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
+ - Dictionary takes Size, Label, Type. Size is in MB.
+ type: list
+ alert_bwin_enabled:
+ description:
+ - Set status of bandwidth in alerts.
+ type: bool
+ alert_bwin_threshold:
+ description:
+ - Set threshold in MB of bandwidth in alerts.
+ type: int
+ alert_bwout_enabled:
+ description:
+ - Set status of bandwidth out alerts.
+ type: bool
+ alert_bwout_threshold:
+ description:
+ - Set threshold in MB of bandwidth out alerts.
+ type: int
+ alert_bwquota_enabled:
+ description:
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ type: bool
+ alert_bwquota_threshold:
+ description:
+ - Set threshold in MB of bandwidth quota alerts.
+ type: int
+ alert_cpu_enabled:
+ description:
+ - Set status of receiving CPU usage alerts.
+ type: bool
+ alert_cpu_threshold:
+ description:
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ type: int
+ alert_diskio_enabled:
+ description:
+ - Set status of receiving disk IO alerts.
+ type: bool
+ alert_diskio_threshold:
+ description:
+ - Set threshold for average IO ops/sec over 2 hour period.
+ type: int
+ backupweeklyday:
+ description:
+ - Integer value for what day of the week to store weekly backups.
+ type: int
+ plan:
+ description:
+ - plan to use for the instance (Linode plan)
+ type: int
+ payment_term:
+ description:
+ - payment term to use for the instance (payment term in months)
+ default: 1
+ choices: [ 1, 12, 24 ]
+ type: int
+ password:
+ description:
+ - root password to apply to a new server (auto generated if missing)
+ type: str
+ private_ip:
+ description:
+ - Add private IPv4 address when Linode is created.
+ - Default is C(false).
+ type: bool
+ ssh_pub_key:
+ description:
+ - SSH public key applied to root user
+ type: str
+ swap:
+ description:
+ - swap size in MB
+ default: 512
+ type: int
+ distribution:
+ description:
+ - distribution to use for the instance (Linode Distribution)
+ type: int
+ datacenter:
+ description:
+ - datacenter to create an instance in (Linode Datacenter)
+ type: int
+ kernel_id:
+ description:
+ - kernel to use for the instance (Linode Kernel)
+ type: int
+ wait:
+ description:
+ - wait for the instance to be in state C(running) before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+ type: int
+ watchdog:
+ description:
+ - Set status of Lassie watchdog.
+ type: bool
+ default: "True"
+requirements:
+ - python >= 2.6
+ - linode-python
+author:
+- Vincent Viallet (@zbal)
+notes:
+ - Please note, linode-python does not have python 3 support.
+ - This module uses the now deprecated v3 of the Linode API.
+ - C(LINODE_API_KEY) env variable can be used instead.
+ - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
+'''
+
+EXAMPLES = '''
+
+- name: Create a new Linode
+ community.general.linode:
+ name: linode-test1
+ plan: 1
+ datacenter: 7
+ distribution: 129
+ state: present
+ register: linode_creation
+
+- name: Create a server with a private IP Address
+ community.general.linode:
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Fully configure new server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: True
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: True
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: True
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: True
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: True
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
+ - {Label: 'disk1', Size: 2500, Type: 'raw'}
+ - {Label: 'newdisk', Size: 2000}
+ watchdog: True
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Ensure a running server (create if missing)
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Delete a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Stop a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
+ delegate_to: localhost
+
+- name: Reboot a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
+ delegate_to: localhost
+'''
+
+import os
+import time
+import traceback
+
+LINODE_IMP_ERR = None
+try:
+ from linode import api as linode_api
+ HAS_LINODE = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def randompass():
+ '''
+ Generate a long random password that comply to Linode requirements
+ '''
+ # Linode API currently requires the following:
+ # It must contain at least two of these four character classes:
+ # lower case letters - upper case letters - numbers - punctuation
+ # we play it safe :)
+ import random
+ import string
+ # as of python 2.4, this reseeds the PRNG from urandom
+ random.seed()
+ lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
+ upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
+ number = ''.join(random.choice(string.digits) for x in range(6))
+ punct = ''.join(random.choice(string.punctuation) for x in range(6))
+ p = lower + upper + number + punct
+ return ''.join(random.sample(p, len(p)))
+
+
+def getInstanceDetails(api, server):
+ '''
+ Return the details of an instance, populating IPs, etc.
+ '''
+ instance = {'id': server['LINODEID'],
+ 'name': server['LABEL'],
+ 'public': [],
+ 'private': []}
+
+ # Populate with ips
+ for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
+ if ip['ISPUBLIC'] and 'ipv4' not in instance:
+ instance['ipv4'] = ip['IPADDRESS']
+ instance['fqdn'] = ip['RDNS_NAME']
+ if ip['ISPUBLIC']:
+ instance['public'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ else:
+ instance['private'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ return instance
+
+
+def linodeServers(module, api, state, name,
+ displaygroup, plan, additional_disks, distribution,
+ datacenter, kernel_id, linode_id, payment_term, password,
+ private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
+ instances = []
+ changed = False
+ new_server = False
+ servers = []
+ disks = []
+ configs = []
+ jobs = []
+
+ # See if we can match an existing server details with the provided linode_id
+ if linode_id:
+ # For the moment we only consider linode_id as criteria for match
+ # Later we can use more (size, name, etc.) and update existing
+ servers = api.linode_list(LinodeId=linode_id)
+ # Attempt to fetch details about disks and configs only if servers are
+ # found with linode_id
+ if servers:
+ disks = api.linode_disk_list(LinodeId=linode_id)
+ configs = api.linode_config_list(LinodeId=linode_id)
+
+ # Act on the state
+ if state in ('active', 'present', 'started'):
+ # TODO: validate all the plan / distribution / datacenter are valid
+
+ # Multi step process/validation:
+ # - need linode_id (entity)
+ # - need disk_id for linode_id - create disk from distrib
+ # - need config_id for linode_id - create config (need kernel)
+
+ # Any create step triggers a job that need to be waited for.
+ if not servers:
+ for arg in (name, plan, distribution, datacenter):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead
+ # Create linode entity
+ new_server = True
+
+ # Get size of all individually listed disks to subtract from Distribution disk
+ used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
+
+ try:
+ res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
+ PaymentTerm=payment_term)
+ linode_id = res['LinodeID']
+ # Update linode Label to match name
+ api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
+ # Update Linode with Ansible configuration options
+ api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
+ # Save server
+ servers = api.linode_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Add private IP to Linode
+ if private_ip:
+ try:
+ res = api.linode_ip_addprivate(LinodeID=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not disks:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create disks (1 from distrib, 1 for SWAP)
+ new_server = True
+ try:
+ if not password:
+ # Password is required on creation, if not provided generate one
+ password = randompass()
+ if not swap:
+ swap = 512
+ # Create data disk
+ size = servers[0]['TOTALHD'] - used_disk_space - swap
+
+ if ssh_pub_key:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password, rootSSHKey=ssh_pub_key,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ else:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ jobs.append(res['JobID'])
+ # Create SWAP disk
+ res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
+ Label='%s swap disk (lid: %s)' % (name, linode_id),
+ Size=swap)
+ # Create individually listed disks at specified size
+ if additional_disks:
+ for disk in additional_disks:
+ # If a disk Type is not passed in, default to ext4
+ if disk.get('Type') is None:
+ disk['Type'] = 'ext4'
+ res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
+
+ jobs.append(res['JobID'])
+ except Exception as e:
+ # TODO: destroy linode ?
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not configs:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+
+ # Check architecture
+ for distrib in api.avail_distributions():
+ if distrib['DISTRIBUTIONID'] != distribution:
+ continue
+ arch = '32'
+ if distrib['IS64BIT']:
+ arch = '64'
+ break
+
+ # Get latest kernel matching arch if kernel_id is not specified
+ if not kernel_id:
+ for kernel in api.avail_kernels():
+ if not kernel['LABEL'].startswith('Latest %s' % arch):
+ continue
+ kernel_id = kernel['KERNELID']
+ break
+
+ # Get disk list
+ disks_id = []
+ for disk in api.linode_disk_list(LinodeId=linode_id):
+ if disk['TYPE'] == 'ext3':
+ disks_id.insert(0, str(disk['DISKID']))
+ continue
+ disks_id.append(str(disk['DISKID']))
+ # Trick to get the 9 items in the list
+ while len(disks_id) < 9:
+ disks_id.append('')
+ disks_list = ','.join(disks_id)
+
+ # Create config
+ new_server = True
+ try:
+ api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
+ Disklist=disks_list, Label='%s config' % name)
+ configs = api.linode_config_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Start / Ensure servers are running
+ for server in servers:
+ # Refresh server state
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # Ensure existing servers are up and running, boot if necessary
+ if server['STATUS'] != 1:
+ res = api.linode_boot(LinodeId=linode_id)
+ jobs.append(res['JobID'])
+ changed = True
+
+ # wait here until the instances are up
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ # refresh the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # status:
+ # -2: Boot failed
+ # 1: Running
+ if server['STATUS'] in (-2, 1):
+ break
+ time.sleep(5)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
+ # Get a fresh copy of the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ if server['STATUS'] == -2:
+ module.fail_json(msg='%s (lid: %s) failed to boot' %
+ (server['LABEL'], server['LINODEID']))
+ # From now on we know the task is a success
+ # Build instance report
+ instance = getInstanceDetails(api, server)
+ # depending on wait flag select the status
+ if wait:
+ instance['status'] = 'Running'
+ else:
+ instance['status'] = 'Starting'
+
+ # Return the root password if this is a new box and no SSH key
+ # has been provided
+ if new_server and not ssh_pub_key:
+ instance['password'] = password
+ instances.append(instance)
+
+ elif state in ('stopped'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for stopped state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ if server['STATUS'] != 2:
+ try:
+ res = api.linode_shutdown(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Stopping'
+ changed = True
+ else:
+ instance['status'] = 'Stopped'
+ instances.append(instance)
+
+ elif state in ('restarted'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for restarted state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ res = api.linode_reboot(LinodeId=server['LINODEID'])
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Restarting'
+ changed = True
+ instances.append(instance)
+
+ elif state in ('absent', 'deleted'):
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Deleting'
+ changed = True
+ instances.append(instance)
+
+ # Ease parsing if only 1 instance
+ if len(instances) == 1:
+ module.exit_json(changed=changed, instance=instances[0])
+
+ module.exit_json(changed=changed, instances=instances)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
+ api_key=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ alert_bwin_enabled=dict(type='bool'),
+ alert_bwin_threshold=dict(type='int'),
+ alert_bwout_enabled=dict(type='bool'),
+ alert_bwout_threshold=dict(type='int'),
+ alert_bwquota_enabled=dict(type='bool'),
+ alert_bwquota_threshold=dict(type='int'),
+ alert_cpu_enabled=dict(type='bool'),
+ alert_cpu_threshold=dict(type='int'),
+ alert_diskio_enabled=dict(type='bool'),
+ alert_diskio_threshold=dict(type='int'),
+ backupsenabled=dict(type='int'),
+ backupweeklyday=dict(type='int'),
+ backupwindow=dict(type='int'),
+ displaygroup=dict(type='str', default=''),
+ plan=dict(type='int'),
+ additional_disks=dict(type='list'),
+ distribution=dict(type='int'),
+ datacenter=dict(type='int'),
+ kernel_id=dict(type='int'),
+ linode_id=dict(type='int', aliases=['lid']),
+ payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
+ password=dict(type='str', no_log=True),
+ private_ip=dict(type='bool'),
+ ssh_pub_key=dict(type='str'),
+ swap=dict(type='int', default=512),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ watchdog=dict(type='bool', default=True),
+ ),
+ )
+
+ if not HAS_LINODE:
+ module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
+
+ state = module.params.get('state')
+ api_key = module.params.get('api_key')
+ name = module.params.get('name')
+ alert_bwin_enabled = module.params.get('alert_bwin_enabled')
+ alert_bwin_threshold = module.params.get('alert_bwin_threshold')
+ alert_bwout_enabled = module.params.get('alert_bwout_enabled')
+ alert_bwout_threshold = module.params.get('alert_bwout_threshold')
+ alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
+ alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
+ alert_cpu_enabled = module.params.get('alert_cpu_enabled')
+ alert_cpu_threshold = module.params.get('alert_cpu_threshold')
+ alert_diskio_enabled = module.params.get('alert_diskio_enabled')
+ alert_diskio_threshold = module.params.get('alert_diskio_threshold')
+ backupsenabled = module.params.get('backupsenabled')
+ backupweeklyday = module.params.get('backupweeklyday')
+ backupwindow = module.params.get('backupwindow')
+ displaygroup = module.params.get('displaygroup')
+ plan = module.params.get('plan')
+ additional_disks = module.params.get('additional_disks')
+ distribution = module.params.get('distribution')
+ datacenter = module.params.get('datacenter')
+ kernel_id = module.params.get('kernel_id')
+ linode_id = module.params.get('linode_id')
+ payment_term = module.params.get('payment_term')
+ password = module.params.get('password')
+ private_ip = module.params.get('private_ip')
+ ssh_pub_key = module.params.get('ssh_pub_key')
+ swap = module.params.get('swap')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ watchdog = int(module.params.get('watchdog'))
+
+ kwargs = dict()
+ check_items = dict(
+ alert_bwin_enabled=alert_bwin_enabled,
+ alert_bwin_threshold=alert_bwin_threshold,
+ alert_bwout_enabled=alert_bwout_enabled,
+ alert_bwout_threshold=alert_bwout_threshold,
+ alert_bwquota_enabled=alert_bwquota_enabled,
+ alert_bwquota_threshold=alert_bwquota_threshold,
+ alert_cpu_enabled=alert_cpu_enabled,
+ alert_cpu_threshold=alert_cpu_threshold,
+ alert_diskio_enabled=alert_diskio_enabled,
+ alert_diskio_threshold=alert_diskio_threshold,
+ backupweeklyday=backupweeklyday,
+ backupwindow=backupwindow,
+ )
+
+ for key, value in check_items.items():
+ if value is not None:
+ kwargs[key] = value
+
+ # Setup the api_key
+ if not api_key:
+ try:
+ api_key = os.environ['LINODE_API_KEY']
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ # setup the auth
+ try:
+ api = linode_api.Api(api_key)
+ api.test_echo()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ linodeServers(module, api, state, name,
+ displaygroup, plan,
+ additional_disks, distribution, datacenter, kernel_id, linode_id,
+ payment_term, password, private_ip, ssh_pub_key, swap, wait,
+ wait_timeout, watchdog, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py
new file mode 100644
index 00000000..17a697b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode_v4
+short_description: Manage instances on the Linode cloud.
+description: Manage instances on the Linode cloud.
+requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+author:
+ - Luke Murphy (@decentral1se)
+notes:
+ - No Linode resizing is currently implemented. This module will, in time,
+ replace the current Linode module which uses deprecated API bindings on the
+ Linode side.
+options:
+ region:
+ description:
+ - The region of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/regions/).
+ required: false
+ type: str
+ image:
+ description:
+ - The image of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/images/).
+ type: str
+ required: false
+ type:
+ description:
+ - The type of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/linode-types/).
+ type: str
+ required: false
+ label:
+ description:
+ - The instance label. This label is used as the main determiner for
+ idempotence for the module and is therefore mandatory.
+ type: str
+ required: true
+ group:
+ description:
+ - The group that the instance should be marked under. Please note, that
+ group labelling is deprecated but still supported. The encouraged
+ method for marking instances is to use tags.
+ type: str
+ required: false
+ tags:
+ description:
+ - The tags that the instance should be marked under. See
+ U(https://www.linode.com/docs/api/tags/).
+ required: false
+ type: list
+ root_pass:
+ description:
+ - The password for the root user. If not specified, one will be
+ generated. This generated password will be available in the task
+ success JSON.
+ required: false
+ type: str
+ authorized_keys:
+ description:
+ - A list of SSH public key parts to deploy for the root user.
+ required: false
+ type: list
+ state:
+ description:
+ - The desired instance state.
+ type: str
+ choices:
+ - present
+ - absent
+ required: true
+ access_token:
+ description:
+ - The Linode API v4 access token. It may also be specified by exposing
+ the C(LINODE_ACCESS_TOKEN) environment variable. See
+ U(https://www.linode.com/docs/api#access-and-authentication).
+ required: true
+ type: str
+ stackscript_id:
+ description:
+ - The numeric ID of the StackScript to use when creating the instance.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: int
+ version_added: 1.3.0
+ stackscript_data:
+ description:
+ - An object containing arguments to any User Defined Fields present in
+ the StackScript used when creating the instance.
+ Only valid when a stackscript_id is provided.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: dict
+ version_added: 1.3.0
+'''
+
+EXAMPLES = """
+- name: Create a new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ type: g6-nanode-1
+ region: eu-west
+ image: linode/debian9
+ root_pass: passw0rd
+ authorized_keys:
+ - "ssh-rsa ..."
+ stackscript_id: 1337
+ stackscript_data:
+ variable: value
+ state: present
+
+- name: Delete that new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ state: absent
+"""
+
+RETURN = """
+instance:
+ description: The instance description in JSON serialized form.
+ returned: Always.
+ type: dict
+ sample: {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+
+LINODE_IMP_ERR = None
+try:
+ from linode_api4 import Instance, LinodeClient
+ HAS_LINODE_DEPENDENCY = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE_DEPENDENCY = False
+
+
+def create_linode(module, client, **kwargs):
+ """Creates a Linode instance and handles return format."""
+ if kwargs['root_pass'] is None:
+ kwargs.pop('root_pass')
+
+ try:
+ response = client.linode.instance_create(**kwargs)
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+ try:
+ if isinstance(response, tuple):
+ instance, root_pass = response
+ instance_json = instance._raw_json
+ instance_json.update({'root_pass': root_pass})
+ return instance_json
+ else:
+ return response._raw_json
+ except TypeError:
+ module.fail_json(msg='Unable to parse Linode instance creation'
+ ' response. Please raise a bug against this'
+ ' module on https://github.com/ansible/ansible/issues'
+ )
+
+
+def maybe_instance_from_label(module, client):
+ """Try to retrieve an instance based on a label."""
+ try:
+ label = module.params['label']
+ result = client.linode.instances(Instance.label == label)
+ return result[0]
+ except IndexError:
+ return None
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+
+def initialise_module():
+ """Initialise the module parameter specification."""
+ return AnsibleModule(
+ argument_spec=dict(
+ label=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ access_token=dict(
+ type='str',
+ required=True,
+ no_log=True,
+ fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
+ ),
+ authorized_keys=dict(type='list', required=False),
+ group=dict(type='str', required=False),
+ image=dict(type='str', required=False),
+ region=dict(type='str', required=False),
+ root_pass=dict(type='str', required=False, no_log=True),
+ tags=dict(type='list', required=False),
+ type=dict(type='str', required=False),
+ stackscript_id=dict(type='int', required=False),
+ stackscript_data=dict(type='dict', required=False),
+ ),
+ supports_check_mode=False,
+ required_one_of=(
+ ['state', 'label'],
+ ),
+ required_together=(
+ ['region', 'image', 'type'],
+ )
+ )
+
+
+def build_client(module):
+ """Build a LinodeClient."""
+ return LinodeClient(
+ module.params['access_token'],
+ user_agent=get_user_agent('linode_v4_module')
+ )
+
+
+def main():
+ """Module entrypoint."""
+ module = initialise_module()
+
+ if not HAS_LINODE_DEPENDENCY:
+ module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
+
+ client = build_client(module)
+ instance = maybe_instance_from_label(module, client)
+
+ if module.params['state'] == 'present' and instance is not None:
+ module.exit_json(changed=False, instance=instance._raw_json)
+
+ elif module.params['state'] == 'present' and instance is None:
+ instance_json = create_linode(
+ module, client,
+ authorized_keys=module.params['authorized_keys'],
+ group=module.params['group'],
+ image=module.params['image'],
+ label=module.params['label'],
+ region=module.params['region'],
+ root_pass=module.params['root_pass'],
+ tags=module.params['tags'],
+ ltype=module.params['type'],
+ stackscript=module.params['stackscript_id'],
+ stackscript_data=module.params['stackscript_data'],
+ )
+ module.exit_json(changed=True, instance=instance_json)
+
+ elif module.params['state'] == 'absent' and instance is not None:
+ instance.delete()
+ module.exit_json(changed=True, instance=instance._raw_json)
+
+ elif module.params['state'] == 'absent' and instance is None:
+ module.exit_json(changed=False, instance={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
new file mode 100644
index 00000000..27ecca8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2017, Nathan Davison <ndavison85@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: listen_ports_facts
+author:
+ - Nathan Davison (@ndavison)
+description:
+ - Gather facts on processes listening on TCP and UDP ports using netstat command.
+ - This module currently supports Linux only.
+requirements:
+ - netstat
+short_description: Gather facts on processes listening on TCP and UDP ports.
+'''
+
+EXAMPLES = r'''
+- name: Gather facts on listening ports
+ community.general.listen_ports_facts:
+
+- name: TCP whitelist violation
+ ansible.builtin.debug:
+ msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
+ vars:
+ tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
+ tcp_whitelist:
+ - 22
+ - 25
+ loop: "{{ tcp_listen_violations }}"
+
+- name: List TCP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
+
+- name: List UDP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
+
+- name: List all ports
+ ansible.builtin.debug:
+ msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of TCP and UDP ports with listening servers
+ returned: always
+ type: complex
+ contains:
+ tcp_listen:
+ description: A list of processes that are listening on a TCP port.
+ returned: if TCP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "mysqld"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 1223
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 3306
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "tcp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "mysql"
+ udp_listen:
+ description: A list of processes that are listening on a UDP port.
+ returned: if UDP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "rsyslogd"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 609
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 514
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "udp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "root"
+'''
+
+import re
+import platform
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def netStatParse(raw):
+ results = list()
+ for line in raw.splitlines():
+ listening_search = re.search('[^ ]+:[0-9]+', line)
+ if listening_search:
+ splitted = line.split()
+ conns = re.search('([^ ]+):([0-9]+)', splitted[3])
+ pidstr = ''
+ if 'tcp' in splitted[0]:
+ protocol = 'tcp'
+ pidstr = splitted[6]
+ elif 'udp' in splitted[0]:
+ protocol = 'udp'
+ pidstr = splitted[5]
+ pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
+ if conns and pids:
+ address = conns.group(1)
+ port = conns.group(2)
+ if (pids.group(2)):
+ pid = pids.group(2)
+ else:
+ pid = 0
+ if (pids.group(3)):
+ name = pids.group(3)
+ else:
+ name = ''
+ result = {
+ 'pid': int(pid),
+ 'address': address,
+ 'port': int(port),
+ 'protocol': protocol,
+ 'name': name,
+ }
+ if result not in results:
+ results.append(result)
+ else:
+ raise EnvironmentError('Could not get process information for the listening ports.')
+ return results
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ if platform.system() != 'Linux':
+ module.fail_json(msg='This module requires Linux.')
+
+ def getPidSTime(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
+ stime = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if 'started' not in line:
+ stime = line
+ return stime
+
+ def getPidUser(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
+ user = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if line != 'USER':
+ user = line
+ return user
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': {
+ 'tcp_listen': [],
+ 'udp_listen': [],
+ },
+ }
+
+ try:
+ netstat_cmd = module.get_bin_path('netstat', True)
+
+ # which ports are listening for connections?
+ rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
+ if rc == 0:
+ netstatOut = netStatParse(stdout)
+ for p in netstatOut:
+ p['stime'] = getPidSTime(p['pid'])
+ p['user'] = getPidUser(p['pid'])
+ if p['protocol'] == 'tcp':
+ result['ansible_facts']['tcp_listen'].append(p)
+ elif p['protocol'] == 'udp':
+ result['ansible_facts']['udp_listen'].append(p)
+ except (KeyError, EnvironmentError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py
new file mode 100644
index 00000000..ae86db40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ community.general.lldp:
+
+ - name: Print each switch/port
+ ansible.builtin.debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
+ with_items: "{{ lldp.keys() }}"
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def gather_lldp(module):
+ cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue']
+ rc, output, err = module.run_command(cmd)
+ if output:
+ output_dict = {}
+ current_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp(module)
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py
new file mode 100644
index 00000000..9a5b84f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ type: str
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ community.general.locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py
new file mode 100644
index 00000000..8f39fb51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+options:
+ path:
+ type: str
+ description:
+ - path to a log file
+ required: true
+ state:
+ type: str
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - name of the log
+ required: false
+ logtype:
+ type: str
+ description:
+ - type of the log
+ required: false
+ aliases: [type]
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- name: Track nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+- name: Stop tracking nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/error.log
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name', name])
+ if logtype:
+ cmd.extend(['--type', logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py
new file mode 100644
index 00000000..59e0f325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries_msg
+short_description: Send a message to logentries.
+description:
+ - Send a message to logentries
+requirements:
+ - "python >= 2.6"
+options:
+ token:
+ type: str
+ description:
+ - Log token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ api:
+ type: str
+ description:
+ - API endpoint
+ default: data.logentries.com
+ port:
+ type: int
+ description:
+ - API endpoint port
+ default: 80
+author: "Jimmy Tang (@jcftang) <jimmy_tang@rapid7.com>"
+'''
+
+RETURN = '''# '''
+
+EXAMPLES = '''
+- name: Send a message to logentries
+ community.general.logentries_msg:
+ token=00000000-0000-0000-0000-000000000000
+ msg="{{ ansible_hostname }}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(module, token, msg, api, port):
+
+ message = "{0} {1}\n".format(token, msg)
+
+ api_ip = socket.gethostbyname(api)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((api_ip, port))
+ try:
+ if not module.check_mode:
+ s.send(message)
+ except Exception as e:
+ module.fail_json(msg="failed to send message, msg=%s" % e)
+ s.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=True),
+ api=dict(type='str', default="data.logentries.com"),
+ port=dict(type='int', default=80)),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ msg = module.params["msg"]
+ api = module.params["api"]
+ port = module.params["port"]
+
+ changed = False
+ try:
+ send_msg(module, token, msg, api, port)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py
new file mode 100644
index 00000000..4a45c04a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logstash_plugin
+short_description: Manage Logstash plugins
+description:
+ - Manages Logstash plugins.
+author: Loic Blot (@nerzhul)
+options:
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: True
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify plugin Version of the plugin to install.
+ If plugin exists with previous version, it will NOT be updated.
+'''
+
+EXAMPLES = '''
+- name: Install Logstash beats input plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+
+- name: Install specific version of a plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-syslog
+ version: '3.2.0'
+
+- name: Uninstall Logstash plugin
+ community.general.logstash_plugin:
+ state: absent
+ name: logstash-filter-multiline
+
+- name: Install Logstash plugin with alternate heap size
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+ environment:
+ LS_JAVA_OPTS: "-Xms256m -Xmx256m"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+
+def is_plugin_present(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "list", plugin_name]
+ rc, out, err = module.run_command(" ".join(cmd_args))
+ return rc == 0
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ cmd_args.append("--version %s" % version)
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ plugin_bin = module.params["plugin_bin"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(module, plugin_bin, name)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py
new file mode 100644
index 00000000..25f261ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ elements: str
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ pvresize:
+ description:
+ - If C(yes), resize the physical volume to the maximum available size.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: community.general.filesystem
+- module: community.general.lvol
+- module: community.general.parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ community.general.lvg:
+ vg: vg.services
+ state: absent
+
+- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible
+ community.general.lvg:
+ vg: resizableVG
+ pvs: /dev/sda3
+ pvresize: yes
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list', elements='str'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ pvresize=dict(type='bool', default=False),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pvresize = module.boolean(module.params['pvresize'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if state == 'present' and pvresize:
+ for device in current_devs:
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+ rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
+ dev_size = int(dev_size.replace(" ", ""))
+ rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
+ pv_size = int(pv_size.replace(" ", ""))
+ rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
+ pe_start = int(pe_start.replace(" ", ""))
+ rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
+ vg_extent_size = int(vg_extent_size.replace(" ", ""))
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ rc, _, err = module.run_command([pvresize_cmd, device])
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
+ else:
+ changed = True
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py
new file mode 100644
index 00000000..fa50007e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py
@@ -0,0 +1,566 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+options:
+ vg:
+ type: str
+ required: true
+ description:
+ - The volume group this logical volume is part of.
+ lv:
+ type: str
+ description:
+ - The name of the logical volume.
+ size:
+ type: str
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
+ state:
+ type: str
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ choices: [ absent, present ]
+ default: present
+ active:
+ description:
+ - Whether the volume is active and visible to the host.
+ type: bool
+ default: 'yes'
+ force:
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ type: bool
+ default: 'no'
+ opts:
+ type: str
+ description:
+ - Free-form options to be passed to the lvcreate command.
+ snapshot:
+ type: str
+ description:
+ - The name of the snapshot volume
+ pvs:
+ type: str
+ description:
+ - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ thinpool:
+ type: str
+ description:
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ shrink:
+ description:
+ - Shrink if current size is higher than size requested.
+ type: bool
+ default: 'yes'
+ resizefs:
+ description:
+ - Resize the underlying filesystem together with the logical volume.
+ type: bool
+ default: 'no'
+notes:
+ - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
+'''
+
+EXAMPLES = '''
+- name: Create a logical volume of 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+- name: Create cache pool logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
+
+- name: Create a logical volume of 512g.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+
+- name: Create a logical volume the size of all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
+
+- name: Create a logical volume with special options
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
+
+- name: Extend the logical volume to 1024m.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+- name: Extend the logical volume to consume all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+ resizefs: true
+
+- name: Resize the logical volume to % of VG
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: yes
+
+- name: Reduce the logical volume to 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: yes
+
+- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: no
+
+- name: Remove the logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: yes
+
+- name: Create a snapshot volume of the test logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+- name: Deactivate a logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+- name: Create a deactivated logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
+
+- name: Create a thin pool of 512g
+ community.general.lvol:
+ vg: firefly
+ thinpool: testpool
+ size: 512g
+
+- name: Create a thin volume of 128g
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ thinpool: testpool
+ size: 128g
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LVOL_ENV_VARS = dict(
+ # make sure we use the C locale when running lvol-related commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[', '').replace(']', ''),
+ 'size': float(parts[1]),
+ 'active': (parts[2][4] == 'a'),
+ 'thinpool': (parts[2][0] == 't'),
+ 'thinvol': (parts[2][0] == 'V'),
+ })
+ return lvs
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': float(parts[1]),
+ 'free': float(parts[2]),
+ 'ext_size': float(parts[3])
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str'),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ shrink=dict(type='bool', default=True),
+ active=dict(type='bool', default=True),
+ snapshot=dict(type='str'),
+ pvs=dict(type='str'),
+ resizefs=dict(type='bool', default=False),
+ thinpool=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['lv', 'thinpool'],
+ ),
+ )
+
+ module.run_command_environ_update = LVOL_ENV_VARS
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found is None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ resizefs = module.boolean(module.params['resizefs'])
+ thinpool = module.params['thinpool']
+ size_opt = 'L'
+ size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVCREATE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ if '%' not in size:
+ # LVCREATE(8) -L --size option unit
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit():
+ raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot:
+ # Check snapshot pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == lv or test_lv['name'] == thinpool:
+ if not test_lv['thinpool'] and not thinpool:
+ break
+ else:
+ module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
+ else:
+ module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
+ check_lv = snapshot
+
+ elif thinpool:
+ if lv:
+ # Check thin volume pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == thinpool:
+ break
+ else:
+ module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
+ check_lv = lv
+ else:
+ check_lv = thinpool
+ else:
+ check_lv = lv
+
+ for test_lv in lvs:
+ if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ # Require size argument except for snapshot of thin volumes
+ if (lv or thinpool) and not size:
+ for test_lv in lvs:
+ if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
+ break
+ else:
+ module.fail_json(msg="No size given.")
+
+ # create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ if size:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
+ elif thinpool and lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
+ elif thinpool and not lv:
+ cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ # Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+
+ # Round down to the next lowest whole physical extent
+ size_requested -= (size_requested % this_vg['ext_size'])
+
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ # resize LV based on absolute values
+ tool = None
+ if float(size) > this_lv['size']:
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and float(size) < this_lv['size']:
+ if float(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py
new file mode 100644
index 00000000..c1a3d1c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py
@@ -0,0 +1,1760 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lxc_container
+short_description: Manage LXC Containers
+description:
+ - Management of LXC containers.
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to C($CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under PATH.
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: 'no'
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: 'no'
+ archive:
+ description:
+ - Create an archive of a container.
+ - This will create a tarball of the running container.
+ type: bool
+ default: 'no'
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method will attempt to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using I(clone_name) the newly cloned
+ container created in a stopped state.
+ - The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ itself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+'''
+
+EXAMPLES = r"""
+- name: Create a started container
+ community.general.lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ community.general.lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ community.general.lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ community.general.lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ community.general.lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ ansible.builtin.debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ community.general.lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ community.general.lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ community.general.lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ community.general.lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ community.general.lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: Debug info on container "test-container"
+ ansible.builtin.debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ community.general.lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ community.general.lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ community.general.lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN = r"""
+lxc_container:
+ description: container information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: bool
+ sample: True
+"""
+
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+import time
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_text, to_bytes
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables-lxc-copy': {
+ 'backing_store': '--backingstorage',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--name',
+ 'clone_name': '--newname'
+ },
+ # lxc-clone is deprecated in favor of lxc-copy
+ 'variables-lxc-clone': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ unset HOSTNAME
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700', 8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = BOOLEANS_FALSE.union([None, ''])
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
+
+ parsed_options = [i.split('=', 1) for i in _container_config]
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ # lxc-clone is deprecated in favor of lxc-copy
+ clone_vars = 'variables-lxc-copy'
+ clone_cmd = self.module.get_bin_path('lxc-copy')
+ if not clone_cmd:
+ clone_vars = 'variables-lxc-clone'
+ clone_cmd = self.module.get_bin_path('lxc-clone', True)
+
+ build_command = [
+ clone_cmd,
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone'][clone_vars]
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing %s." % os.path.basename(clone_cmd)
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name': self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077', 8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='list',
+ elements='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default=False
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default=False
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if=([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py
new file mode 100644
index 00000000..7bd7b9ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_cmms
+short_description: Custom module for lxca cmms inventory utility
+description:
+ - This module returns/displays a inventory details of cmms
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: cmms
+ choices:
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all cmms info
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+
+# get specific cmms info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_uuid
+
+# get specific cmms info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_chassis_uuid
+
+'''
+
+RETURN = r'''
+result:
+ description: cmms detail from lxca
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple cmms details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import cmms
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _cmms(module, lxca_con):
+ return cmms(lxca_con)
+
+
+def _cmms_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return cmms(lxca_con, module.params['uuid'])
+
+
+def _cmms_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return cmms(lxca_con, chassis=module.params['chassis'])
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'cmms': _cmms,
+ 'cmms_by_uuid': _cmms_by_uuid,
+ 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ uuid=dict(default=None),
+ chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join((e) for e in exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py
new file mode 100644
index 00000000..febe2fd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_nodes
+short_description: Custom module for lxca nodes inventory utility
+description:
+ - This module returns/displays a inventory details of nodes
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: nodes
+ choices:
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all nodes info
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes
+
+# get specific nodes info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_uuid
+
+# get specific nodes info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_chassis_uuid
+
+# get managed nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_managed
+
+# get unmanaged nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_unmanaged
+
+'''
+
+RETURN = r'''
+result:
+ description: nodes detail from lxca
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple nodes details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import nodes
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _nodes(module, lxca_con):
+ return nodes(lxca_con)
+
+
+def _nodes_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return nodes(lxca_con, module.params['uuid'])
+
+
+def _nodes_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return nodes(lxca_con, chassis=module.params['chassis'])
+
+
+def _nodes_status_managed(module, lxca_con):
+ return nodes(lxca_con, status='managed')
+
+
+def _nodes_status_unmanaged(module, lxca_con):
+ return nodes(lxca_con, status='unmanaged')
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'nodes': _nodes,
+ 'nodes_by_uuid': _nodes_by_uuid,
+ 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
+ 'nodes_status_managed': _nodes_status_managed,
+ 'nodes_status_unmanaged': _nodes_status_unmanaged,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ uuid=dict(default=None), chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py
new file mode 100644
index 00000000..119387f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py
@@ -0,0 +1,710 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ architecture:
+ description:
+ - The architecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ type: dict
+ required: false
+ profiles:
+ description:
+ - Profile to be used by the container
+ type: list
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).'
+ - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. Will attempt to create a container on a target node.
+ If container exists elsewhere in a cluster, then container will not be replaced or moved.
+ The name should respond to same name of the node you see in C(lxc cluster list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ type: int
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the C(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ type: bool
+ force_stop:
+ description:
+ - If this is true, the C(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the created container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd # if you get a 404, try setting protocol: simplestreams
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: Check python is installed in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: Install python in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for creating an Ubuntu 14.04 container using an image fingerprint.
+# This requires changing 'server' and 'protocol' key values, replacing the
+# 'alias' key with with 'fingerprint' and supplying an appropriate value that
+# matches the container image you wish to use.
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ # Provides current (and older) Ubuntu images with listed fingerprints
+ server: https://cloud-images.ubuntu.com/releases
+ # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
+ protocol: simplestreams
+ # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
+ fingerprint: e9a8bdfab6dc
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: absent
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ ansible.builtin.fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+
+# An example for LXD cluster deployments. This example will create two new container on specific
+# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
+# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
+# LXD API calls can be made to any LXD member, in this example, we send API requests to
+#'node01.example.com', which matches ansible inventory name.
+- hosts: node01.example.com
+ tasks:
+ - name: Create LXD container
+ community.general.lxd_container:
+ name: new-container-1
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node01
+
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-container-2
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node02
+'''
+
+RETURN = '''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: dict
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: str
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+import datetime
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+ self.target = self.module.params['target']
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json = {'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ if self.target:
+ self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
+ else:
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=None):
+ ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
+
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ for k, v in self.config['config'].items():
+ if k not in old_configs:
+ return True
+ if old_configs[k] != v:
+ return True
+ return False
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ target=dict(
+ type='str',
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py
new file mode 100644
index 00000000..ccd74d42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ type: str
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ community.general.lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.items():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py
new file mode 100644
index 00000000..a865a8f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages (ports)
+options:
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: "no"
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: "no"
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - 'C(variant) is only supported with state: I(installed)/I(present).'
+ aliases: ['variants']
+ type: str
+'''
+EXAMPLES = '''
+- name: Install the foo port
+ community.general.macports:
+ name: foo
+
+- name: Install the universal, x11 variant of the foo port
+ community.general.macports:
+ name: foo
+ variant: +universal+x11
+
+- name: Install a list of ports
+ community.general.macports:
+ name: "{{ ports }}"
+ vars:
+ ports:
+ - foo
+ - foo-tools
+
+- name: Update Macports and the ports tree, then upgrade all outdated ports
+ community.general.macports:
+ selfupdate: yes
+ upgrade: yes
+
+- name: Update Macports and the ports tree, then install the foo port
+ community.general.macports:
+ name: foo
+ selfupdate: yes
+
+- name: Remove the foo port
+ community.general.macports:
+ name: foo
+ state: absent
+
+- name: Activate the foo port
+ community.general.macports:
+ name: foo
+ state: active
+
+- name: Deactivate the foo port
+ community.general.macports:
+ name: foo
+ state: inactive
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def selfupdate(module, port_path):
+ """ Update Macports and the ports tree. """
+
+ rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+
+ if rc == 0:
+ updated = any(
+ re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
+ re.search(r'Installing new Macports release', s.strip())
+ for s in out.split('\n')
+ if s
+ )
+ if updated:
+ changed = True
+ msg = "Macports updated successfully"
+ else:
+ changed = False
+ msg = "Macports already up-to-date"
+
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
+
+
+def upgrade(module, port_path):
+ """ Upgrade outdated ports. """
+
+ rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+
+ # rc is 1 when nothing to upgrade so check stdout first.
+ if out.strip() == "Nothing to upgrade.":
+ changed = False
+ msg = "Ports already upgraded"
+ return (changed, msg)
+ elif rc == 0:
+ changed = True
+ msg = "Outdated ports upgraded successfully"
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
+
+
+def query_port(module, port_path, name, state="present"):
+ """ Returns whether a port is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and out.strip().startswith(name + " "):
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and "(active)" in out:
+ return True
+
+ return False
+
+
+def remove_ports(module, port_path, ports):
+ """ Uninstalls one or more ports if installed. """
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the port that failed
+ for port in ports:
+ # Query the port first, to see if we even need to remove
+ if not query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+
+ if query_port(module, port_path, port):
+ module.fail_json(msg="Failed to remove %s: %s" % (port, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="Port(s) already absent")
+
+
+def install_ports(module, port_path, ports, variant):
+ """ Installs one or more ports if not already installed. """
+
+ install_c = 0
+
+ for port in ports:
+ if query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to install %s: %s" % (port, err))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="Port(s) already present")
+
+
+def activate_ports(module, port_path, ports):
+ """ Activate a port if it's inactive. """
+
+ activate_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
+
+ if query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+
+ if not query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to activate %s: %s" % (port, err))
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
+
+ module.exit_json(changed=False, msg="Port(s) already active")
+
+
+def deactivate_ports(module, port_path, ports):
+ """ Deactivate a port if it's active. """
+
+ deactivated_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
+
+ if not query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+
+ if query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
+
+ module.exit_json(changed=False, msg="Port(s) already inactive")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=["port"]),
+ selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ upgrade=dict(default=False, type='bool'),
+ variant=dict(aliases=["variants"], default=None, type='str')
+ )
+ )
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["selfupdate"]:
+ (changed, msg) = selfupdate(module, port_path)
+ if not (p["name"] or p["upgrade"]):
+ module.exit_json(changed=changed, msg=msg)
+
+ if p["upgrade"]:
+ (changed, msg) = upgrade(module, port_path)
+ if not p["name"]:
+ module.exit_json(changed=changed, msg=msg)
+
+ pkgs = p["name"]
+
+ variant = p["variant"]
+
+ if p["state"] in ["present", "installed"]:
+ install_ports(module, port_path, pkgs, variant)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_ports(module, port_path, pkgs)
+
+ elif p["state"] == "active":
+ activate_ports(module, port_path, pkgs)
+
+ elif p["state"] == "inactive":
+ deactivate_ports(module, port_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py
new file mode 100644
index 00000000..574f8478
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+- Dag Wieers (@dagwieers)
+module: mail
+short_description: Send an email
+description:
+- This module is useful for sending emails from playbooks.
+- One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+- If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make them perform their
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+- Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+options:
+ sender:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ type: str
+ default: root
+ aliases: [ from ]
+ to:
+ description:
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ default: root
+ aliases: [ recipients ]
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: yes
+ type: str
+ aliases: [ msg ]
+ body:
+ description:
+ - The body of the email being sent.
+ type: str
+ username:
+ description:
+ - If SMTP requires username.
+ type: str
+ password:
+ description:
+ - If SMTP requires password.
+ type: str
+ host:
+ description:
+ - The mail server.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The mail server port.
+ - This must be a valid integer between 1 and 65534
+ type: int
+ default: 25
+ attach:
+ description:
+ - A list of pathnames of files to attach to the message.
+ - Attached files will have their content-type set to C(application/octet-stream).
+ type: list
+ default: []
+ headers:
+ description:
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as C(header=value) (see example below).
+ type: list
+ default: []
+ charset:
+ description:
+ - The character set of email being sent.
+ type: str
+ default: utf-8
+ subtype:
+ description:
+ - The minor mime type, can be either C(plain) or C(html).
+ - The major type is always C(text).
+ type: str
+ choices: [ html, plain ]
+ default: plain
+ secure:
+ description:
+ - If C(always), the connection will only send email if the connection is Encrypted.
+ If the server doesn't accept the encrypted connection it will fail.
+ - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ If it is unable to do so it will fail.
+ type: str
+ choices: [ always, never, starttls, try ]
+ default: try
+ timeout:
+ description:
+ - Sets the timeout in seconds for connection attempts.
+ type: int
+ default: 20
+'''
+
+EXAMPLES = r'''
+- name: Example playbook sending mail to root
+ community.general.mail:
+ subject: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Sending an e-mail using Gmail SMTP servers
+ community.general.mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Send e-mail to a bunch of users, attaching files
+ community.general.mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to:
+ - John Doe <j.d@example.org>
+ - Suzie Something <sue@example.com>
+ cc: Charlie Root <root@localhost>
+ attach:
+ - /etc/group
+ - /tmp/avatar2.png
+ headers:
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
+ charset: us-ascii
+ delegate_to: localhost
+
+- name: Sending an e-mail using the remote machine, not the Ansible controller node
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+
+- name: Sending an e-mail using Legacy SSL to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: always
+
+- name: Sending an e-mail using StartTLS to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+'''
+
+import os
+import smtplib
+import ssl
+import traceback
+from email import encoders
+from email.utils import parseaddr, formataddr, formatdate
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.header import Header
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=25),
+ sender=dict(type='str', default='root', aliases=['from']),
+ to=dict(type='list', default=['root'], aliases=['recipients']),
+ cc=dict(type='list', default=[]),
+ bcc=dict(type='list', default=[]),
+ subject=dict(type='str', required=True, aliases=['msg']),
+ body=dict(type='str'),
+ attach=dict(type='list', default=[]),
+ headers=dict(type='list', default=[]),
+ charset=dict(type='str', default='utf-8'),
+ subtype=dict(type='str', default='plain', choices=['html', 'plain']),
+ secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
+ timeout=dict(type='int', default=20),
+ ),
+ required_together=[['password', 'username']],
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ secure = module.params.get('secure')
+ timeout = module.params.get('timeout')
+
+ code = 0
+ secure_state = False
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ if secure != 'never':
+ try:
+ if PY3:
+ smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP_SSL(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+ secure_state = True
+ except ssl.SSLError as e:
+ if secure == 'always':
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ except Exception:
+ pass
+
+ if not secure_state:
+ if PY3:
+ smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ if int(code) > 0:
+ if not secure_state and secure in ('starttls', 'try'):
+ if smtp.has_extn('STARTTLS'):
+ try:
+ smtp.starttls()
+ secure_state = True
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+ else:
+ if secure == 'starttls':
+ module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port))
+
+ if username and password:
+ if smtp.has_extn('AUTH'):
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port))
+ except smtplib.SMTPException:
+ module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port))
+ else:
+ module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port))
+
+ if not secure_state and (username and password):
+ module.warn('Username and Password was sent without encryption')
+
+ msg = MIMEMultipart(_charset=charset)
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = Header(subject, charset)
+ msg.preamble = "Multipart message"
+
+ for header in headers:
+ # NOTE: Backward compatible with old syntax using '|' as delimiter
+ for hdr in [x.strip() for x in header.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ h_val = to_native(Header(h_val, charset))
+ msg.add_header(h_key, h_val)
+ except Exception:
+ module.warn("Skipping header '%s', unable to parse" % hdr)
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', 'Ansible mail module')
+
+ addr_list = []
+ for addr in [x.strip() for x in blindcopies]:
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+
+ to_list = []
+ for addr in [x.strip() for x in recipients]:
+ to_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['To'] = ", ".join(to_list)
+
+ cc_list = []
+ for addr in [x.strip() for x in copies]:
+ cc_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # This breaks files with spaces in it :-(
+ for filename in attach_files:
+ try:
+ part = MIMEBase('application', 'octet-stream')
+ with open(filename, 'rb') as fp:
+ part.set_payload(fp.read())
+ encoders.encode_base64(part)
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename))
+ msg.attach(part)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" %
+ (filename, to_native(e)), exception=traceback.format_exc())
+
+ composed = msg.as_string()
+
+ try:
+ result = smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" %
+ (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc())
+
+ smtp.quit()
+
+ if result:
+ for key in result:
+ module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1]))
+ module.exit_json(msg='Failed to send mail to at least one recipient', result=result)
+
+ module.exit_json(msg='Mail sent successfully', result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py
new file mode 100644
index 00000000..7314af28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements:
+- make
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run.
+ - Typically this would be something like C(install),C(test) or C(all)."
+ type: str
+ params:
+ description:
+ - Any extra parameters to pass to make.
+ type: dict
+ chdir:
+ description:
+ - Change to this directory before running make.
+ type: path
+ required: true
+ file:
+ description:
+ - Use a custom Makefile.
+ type: path
+ make:
+ description:
+ - Use a specific make binary.
+ type: path
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = r'''
+- name: Build the default target
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+
+- name: Run 'install' target as root
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: yes
+
+- name: Build 'all' target with extra arguments
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+
+- name: Build 'all' target with a custom Makefile
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ file: /some-project/Makefile
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(type='str'),
+ params=dict(type='dict'),
+ chdir=dict(type='path', required=True),
+ file=dict(type='path'),
+ make=dict(type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ make_path = module.params['make']
+ if make_path is None:
+ # Build up the invocation of `make` we are going to use
+ # For non-Linux OSes, prefer gmake (GNU make) over make
+ make_path = module.get_bin_path('gmake', required=False)
+ if not make_path:
+ # Fall back to system make
+ make_path = module.get_bin_path('make', required=True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ if module.params['file'] is not None:
+ base_command = [make_path, "-f", module.params['file'], make_target]
+ else:
+ base_command = [make_path, make_target]
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['-q'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't up to date, so we need to run it
+ rc, out, err = run_command(base_command, module,
+ check_rc=True)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ file=module.params['file']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
new file mode 100644
index 00000000..d40a8ca0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alert_profiles
+
+short_description: Configuration of alert profiles for ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert profile should not exist,
+ - present - alert profile should exist,
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The unique alert profile name in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The resource type for the alert profile in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ alerts:
+ type: list
+ description:
+ - List of alert descriptions to assign to this profile.
+ - Required if state is "present"
+ notes:
+ type: str
+ description:
+ - Optional notes for this profile
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert profile to ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: present
+ name: Test profile
+ resource_type: ContainerNode
+ alerts:
+ - Test Alert 01
+ - Test Alert 02
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert profile from ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: absent
+ name: Test profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlertProfiles(object):
+ """ Object to execute alert profile management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
+
+ def get_profiles(self):
+ """ Get all alert profiles from ManageIQ
+ """
+ try:
+ response = self.client.get(self.url + '?expand=alert_definitions,resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
+ return response.get('resources') or []
+
+ def get_alerts(self, alert_descriptions):
+ """ Get a list of alert hrefs from a list of alert descriptions
+ """
+ alerts = []
+ for alert_description in alert_descriptions:
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
+ description=alert_description)
+ alerts.append(alert['href'])
+
+ return alerts
+
+ def add_profile(self, profile):
+ """ Add a new alert profile to ManageIQ
+ """
+ # find all alerts to add to the profile
+ # we do this first to fail early if one is missing.
+ alerts = self.get_alerts(profile['alerts'])
+
+ # build the profile dict to send to the server
+
+ profile_dict = dict(name=profile['name'],
+ description=profile['name'],
+ mode=profile['resource_type'])
+ if profile['notes']:
+ profile_dict['set_data'] = dict(notes=profile['notes'])
+
+ # send it to the server
+ try:
+ result = self.client.post(self.url, resource=profile_dict, action="create")
+ except Exception as e:
+ self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
+
+ # now that it has been created, we can assign the alerts
+ self.assign_or_unassign(result['results'][0], alerts, "assign")
+
+ msg = "Profile {name} created successfully"
+ msg = msg.format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def delete_profile(self, profile):
+ """ Delete an alert profile from ManageIQ
+ """
+ try:
+ self.client.post(profile['href'], action="delete")
+ except Exception as e:
+ self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
+
+ msg = "Successfully deleted profile {name}".format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def get_alert_href(self, alert):
+ """ Get an absolute href for an alert
+ """
+ return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
+
+ def assign_or_unassign(self, profile, resources, action):
+ """ Assign or unassign alerts to profile, and validate the result.
+ """
+ alerts = [dict(href=href) for href in resources]
+
+ subcollection_url = profile['href'] + '/alert_definitions'
+ try:
+ result = self.client.post(subcollection_url, resources=alerts, action=action)
+ if len(result['results']) != len(alerts):
+ msg = "Failed to {action} alerts to profile '{name}'," +\
+ "expected {expected} alerts to be {action}ed," +\
+ "but only {changed} were {action}ed"
+ msg = msg.format(action=action,
+ name=profile['name'],
+ expected=len(alerts),
+ changed=result['results'])
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to {action} alerts to profile '{name}': {error}"
+ msg = msg.format(action=action, name=profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ return result['results']
+
+ def update_profile(self, old_profile, desired_profile):
+ """ Update alert profile in ManageIQ
+ """
+ changed = False
+ # we need to use client.get to query the alert definitions
+ old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
+
+ # figure out which alerts we need to assign / unassign
+ # alerts listed by the user:
+ desired_alerts = set(self.get_alerts(desired_profile['alerts']))
+
+ # alert which currently exist in the profile
+ if 'alert_definitions' in old_profile:
+ # we use get_alert_href to have a direct href to the alert
+ existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
+ else:
+ # no alerts in this profile
+ existing_alerts = set()
+
+ to_add = list(desired_alerts - existing_alerts)
+ to_remove = list(existing_alerts - desired_alerts)
+
+ # assign / unassign the alerts, if needed
+
+ if to_remove:
+ self.assign_or_unassign(old_profile, to_remove, "unassign")
+ changed = True
+ if to_add:
+ self.assign_or_unassign(old_profile, to_add, "assign")
+ changed = True
+
+ # update other properties
+ profile_dict = dict()
+
+ if old_profile['mode'] != desired_profile['resource_type']:
+ # mode needs to be updated
+ profile_dict['mode'] = desired_profile['resource_type']
+
+ # check if notes need to be updated
+ old_notes = old_profile.get('set_data', {}).get('notes')
+
+ if desired_profile['notes'] != old_notes:
+ profile_dict['set_data'] = dict(notes=desired_profile['notes'])
+
+ if profile_dict:
+ # if we have any updated values
+ changed = True
+ try:
+ result = self.client.post(old_profile['href'],
+ resource=profile_dict,
+ action="edit")
+ except Exception as e:
+ msg = "Updating profile '{name}' failed: {error}"
+ msg = msg.format(name=old_profile['name'], error=e)
+ self.module.fail_json(msg=msg, result=result)
+
+ if changed:
+ msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
+ else:
+ msg = "No update needed for profile {name}".format(name=desired_profile['name'])
+ return dict(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ alerts=dict(type='list'),
+ notes=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['name', 'resource_type']),
+ ('state', 'absent', ['name'])])
+
+ state = module.params['state']
+ name = module.params['name']
+
+ manageiq = ManageIQ(module)
+ manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
+
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
+ name=name)
+
+ # we need to add or update the alert profile
+ if state == "present":
+ if not existing_profile:
+ # a profile with this name doesn't exist yet, let's create it
+ res_args = manageiq_alert_profiles.add_profile(module.params)
+ else:
+ # a profile with this name exists, we might need to update it
+ res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
+
+ # this alert profile should not exist
+ if state == "absent":
+ # if we have an alert profile with this name, delete it
+ if existing_profile:
+ res_args = manageiq_alert_profiles.delete_profile(existing_profile)
+ else:
+ # This alert profile does not exist in ManageIQ, and that's okay
+ msg = "Alert profile '{name}' does not exist in ManageIQ"
+ msg = msg.format(name=name)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
new file mode 100644
index 00000000..4f818a3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alerts
+
+short_description: Configuration of alerts in ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com
+description:
+ - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert should not exist,
+ - present - alert should exist,
+ required: False
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The unique alert description in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The entity type for the alert in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ expression_type:
+ type: str
+ description:
+ - Expression type.
+ default: hash
+ choices: ["hash", "miq"]
+ expression:
+ type: dict
+ description:
+ - The alert expression for ManageIQ.
+ - Can either be in the "Miq Expression" format or the "Hash Expression format".
+ - Required if state is "present".
+ enabled:
+ description:
+ - Enable or disable the alert. Required if state is "present".
+ type: bool
+ options:
+ type: dict
+ description:
+ - Additional alert options, such as notification type and frequency
+
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert with a "hash expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 01
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: ContainerNode
+ expression:
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Add an alert with a "miq expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 02
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: Vm
+ expression_type: miq
+ expression:
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert from ManageIQ
+ community.general.manageiq_alerts:
+ state: absent
+ description: Test Alert 01
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlert(object):
+ """ Represent a ManageIQ alert. Can be initialized with both the format
+ we receive from the server and the format we get from the user.
+ """
+ def __init__(self, alert):
+ self.description = alert['description']
+ self.db = alert['db']
+ self.enabled = alert['enabled']
+ self.options = alert['options']
+ self.hash_expression = None
+ self.miq_expressipn = None
+
+ if 'hash_expression' in alert:
+ self.hash_expression = alert['hash_expression']
+ if 'miq_expression' in alert:
+ self.miq_expression = alert['miq_expression']
+ if 'exp' in self.miq_expression:
+ # miq_expression is a field that needs a special case, because
+ # it's returned surrounded by a dict named exp even though we don't
+ # send it with that dict.
+ self.miq_expression = self.miq_expression['exp']
+
+ def __eq__(self, other):
+ """ Compare two ManageIQAlert objects
+ """
+ return self.__dict__ == other.__dict__
+
+
+class ManageIQAlerts(object):
+ """ Object to execute alert management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
+
+ def get_alerts(self):
+ """ Get all alerts from ManageIQ
+ """
+ try:
+ response = self.client.get(self.alerts_url + '?expand=resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
+ return response.get('resources', [])
+
+ def validate_hash_expression(self, expression):
+ """ Validate a 'hash expression' alert definition
+ """
+ # hash expressions must have the following fields
+ for key in ['options', 'eval_method', 'mode']:
+ if key not in expression:
+ msg = "Hash expression is missing required field {key}".format(key=key)
+ self.module.fail_json(msg)
+
+ def create_alert_dict(self, params):
+ """ Create a dict representing an alert
+ """
+ if params['expression_type'] == 'hash':
+ # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
+ self.validate_hash_expression(params['expression'])
+ expression_type = 'hash_expression'
+ else:
+ # actually miq_expression, but we call it "expression" for backwards-compatibility
+ expression_type = 'expression'
+
+ # build the alret
+ alert = dict(description=params['description'],
+ db=params['resource_type'],
+ options=params['options'],
+ enabled=params['enabled'])
+
+ # add the actual expression.
+ alert.update({expression_type: params['expression']})
+
+ return alert
+
+ def add_alert(self, alert):
+ """ Add a new alert to ManageIQ
+ """
+ try:
+ result = self.client.post(self.alerts_url, action='create', resource=alert)
+
+ msg = "Alert {description} created successfully: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Creating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to create a hash expression
+ msg = msg.format(description=alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def delete_alert(self, alert):
+ """ Delete an alert
+ """
+ try:
+ result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
+ id=alert['id']),
+ action="delete")
+ msg = "Alert {description} deleted: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Deleting alert {description} failed: {error}"
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def update_alert(self, existing_alert, new_alert):
+ """ Update an existing alert with the values from `new_alert`
+ """
+ new_alert_obj = ManageIQAlert(new_alert)
+ if new_alert_obj == ManageIQAlert(existing_alert):
+ # no change needed - alerts are identical
+ return dict(changed=False, msg="No update needed")
+ else:
+ try:
+ url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
+ result = self.client.post(url, action="edit", resource=new_alert)
+
+ # make sure that the update was indeed successful by comparing
+ # the result to the expected result.
+ if new_alert_obj == ManageIQAlert(result):
+ # success!
+ msg = "Alert {description} updated successfully: {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ return dict(changed=True, msg=msg)
+ else:
+ # unexpected result
+ msg = "Updating alert {description} failed, unexpected result {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = "Updating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to update a hash expression
+ msg = msg.format(description=existing_alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=existing_alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ description=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
+ expression=dict(type='dict'),
+ options=dict(type='dict'),
+ enabled=dict(type='bool'),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['description',
+ 'resource_type',
+ 'expression',
+ 'enabled',
+ 'options']),
+ ('state', 'absent', ['description'])])
+
+ state = module.params['state']
+ description = module.params['description']
+
+ manageiq = ManageIQ(module)
+ manageiq_alerts = ManageIQAlerts(manageiq)
+
+ existing_alert = manageiq.find_collection_resource_by("alert_definitions",
+ description=description)
+
+ # we need to add or update the alert
+ if state == "present":
+ alert = manageiq_alerts.create_alert_dict(module.params)
+
+ if not existing_alert:
+ # an alert with this description doesn't exist yet, let's create it
+ res_args = manageiq_alerts.add_alert(alert)
+ else:
+ # an alert with this description exists, we might need to update it
+ res_args = manageiq_alerts.update_alert(existing_alert, alert)
+
+ # this alert should not exist
+ elif state == "absent":
+ # if we have an alert with this description, delete it
+ if existing_alert:
+ res_args = manageiq_alerts.delete_alert(existing_alert)
+ else:
+ # it doesn't exist, and that's okay
+ msg = "Alert '{description}' does not exist in ManageIQ"
+ msg = msg.format(description=description)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py
new file mode 100644
index 00000000..2050eb63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py
@@ -0,0 +1,648 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_group
+
+short_description: Management of groups in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
+requirements:
+- manageiq-client
+
+options:
+ state:
+ type: str
+ description:
+ - absent - group should not exist, present - group should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The group description.
+ required: true
+ default: null
+ role_id:
+ type: int
+ description:
+ - The the group role id
+ required: false
+ default: null
+ role:
+ type: str
+ description:
+ - The the group role name
+ - The C(role_id) has precedence over the C(role) when supplied.
+ required: false
+ default: null
+ tenant_id:
+ type: int
+ description:
+ - The tenant for the group identified by the tenant id.
+ required: false
+ default: null
+ tenant:
+ type: str
+ description:
+ - The tenant for the group identified by the tenant name.
+ - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - Tenant names are case sensitive.
+ required: false
+ default: null
+ managed_filters:
+ description: The tag values per category
+ type: dict
+ required: false
+ default: null
+ managed_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing categories are kept or updated, new categories are added.
+ - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ choices: [ merge, replace ]
+ default: replace
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ type: list
+ elements: str
+ required: false
+ default: null
+ belongsto_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing settings are merged with the supplied C(belongsto_filters).
+ - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ choices: [ merge, replace ]
+ default: replace
+'''
+
+EXAMPLES = '''
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: 'my_tenant'
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant_id: 4
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name:
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: my_tenant
+ managed_filters:
+ prov_max_cpu:
+ - '1'
+ - '2'
+ - '4'
+ department:
+ - defense
+ - engineering
+ managed_filters_merge_mode: replace
+ belongsto_filters:
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ belongsto_filters_merge_mode: merge
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a group in ManageIQ
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+
+- name: Delete a group in ManageIQ using a token
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+'''
+
+RETURN = '''
+group:
+ description: The group.
+ returned: success
+ type: complex
+ contains:
+ description:
+ description: The group description
+ returned: success
+ type: str
+ id:
+ description: The group id
+ returned: success
+ type: int
+ group_type:
+ description: The group type, system or user
+ returned: success
+ type: str
+ role:
+ description: The group role name
+ returned: success
+ type: str
+ tenant:
+ description: The group tenant name
+ returned: success
+ type: str
+ managed_filters:
+ description: The tag values per category
+ returned: success
+ type: dict
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ returned: success
+ type: list
+ created_on:
+ description: Group creation date
+ returned: success
+ type: str
+ sample: "2018-08-12T08:37:55+00:00"
+ updated_on:
+ description: Group update date
+ returned: success
+ type: int
+ sample: "2018-08-12T08:37:55+00:00"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQgroup(object):
+ """
+ Object to execute group management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group(self, description):
+ """ Search for group object by description.
+ Returns:
+ the group, or None if group was not found.
+ """
+ groups = self.client.collections.groups.find_by(description=description)
+ if len(groups) == 0:
+ return None
+ else:
+ return groups[0]
+
+ def tenant(self, tenant_id, tenant_name):
+ """ Search for tenant entity by name or id
+ Returns:
+ the tenant entity, None if no id or name was supplied
+ """
+
+ if tenant_id:
+ tenant = self.client.get_entity('tenants', tenant_id)
+ if not tenant:
+ self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
+ return tenant
+ else:
+ if tenant_name:
+ tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
+ if not tenant_res:
+ self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
+ if len(tenant_res) > 1:
+ self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
+ tenant = tenant_res[0]
+ return tenant
+ else:
+ # No tenant name or tenant id supplied
+ return None
+
+ def role(self, role_id, role_name):
+ """ Search for a role object by name or id.
+ Returns:
+ the role entity, None no id or name was supplied
+
+ the role, or send a module Fail signal if role not found.
+ """
+ if role_id:
+ role = self.client.get_entity('roles', role_id)
+ if not role:
+ self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
+ return role
+ else:
+ if role_name:
+ role_res = self.client.collections.roles.find_by(name=role_name)
+ if not role_res:
+ self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
+ if len(role_res) > 1:
+ self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
+ return role_res[0]
+ else:
+ # No role name or role id supplied
+ return None
+
+ @staticmethod
+ def merge_dict_values(norm_current_values, norm_updated_values):
+ """ Create an merged update object for manageiq group filters.
+
+ The input dict contain the tag values per category.
+ If the new values contain the category, all tags for that category are replaced
+ If the new values do not contain the category, the existing tags are kept
+
+ Returns:
+ the nested array with the merged values, used in the update post body
+ """
+
+ # If no updated values are supplied, in merge mode, the original values must be returned
+ # otherwise the existing tag filters will be removed.
+ if norm_current_values and (not norm_updated_values):
+ return norm_current_values
+
+ # If no existing tag filters exist, use the user supplied values
+ if (not norm_current_values) and norm_updated_values:
+ return norm_updated_values
+
+ # start with norm_current_values's keys and values
+ res = norm_current_values.copy()
+ # replace res with norm_updated_values's keys and values
+ res.update(norm_updated_values)
+ return res
+
+ def delete_group(self, group):
+ """ Deletes a group from manageiq.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+ try:
+ url = '%s/groups/%s' % (self.api_url, group['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(
+ changed=True,
+ msg="deleted group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+
+ if role or norm_managed_filters or belongsto_filters:
+ group.reload(attributes=['miq_user_role_name', 'entitlement'])
+
+ try:
+ current_role = group['miq_user_role_name']
+ except AttributeError:
+ current_role = None
+
+ changed = False
+ resource = {}
+
+ if description and group['description'] != description:
+ resource['description'] = description
+ changed = True
+
+ if tenant and group['tenant_id'] != tenant['id']:
+ resource['tenant'] = dict(id=tenant['id'])
+ changed = True
+
+ if role and current_role != role['name']:
+ resource['role'] = dict(id=role['id'])
+ changed = True
+
+ if norm_managed_filters or belongsto_filters:
+
+ # Only compare if filters are supplied
+ entitlement = group['entitlement']
+
+ if 'filters' not in entitlement:
+ # No existing filters exist, use supplied filters
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+ changed = True
+ else:
+ current_filters = entitlement['filters']
+ new_filters = self.edit_group_edit_filters(current_filters,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+ if new_filters:
+ resource['filters'] = new_filters
+ changed = True
+
+ if not changed:
+ return dict(
+ changed=False,
+ msg="group %s is not changed." % group['description'])
+
+ # try to update group
+ try:
+ self.client.post(group['href'], action='edit', resource=resource)
+ changed = True
+ except Exception as e:
+ self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
+
+ return dict(
+ changed=changed,
+ msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group filters.
+
+ Returns:
+ None if no the group was not updated
+ If the group was updated the post body part for updating the group
+ """
+ filters_updated = False
+ new_filters_resource = {}
+
+ current_belongsto_set = current_filters.get('belongsto', set())
+
+ if belongsto_filters:
+ new_belongsto_set = set(belongsto_filters)
+ else:
+ new_belongsto_set = set()
+
+ if current_belongsto_set == new_belongsto_set:
+ new_filters_resource['belongsto'] = current_filters['belongsto']
+ else:
+ if belongsto_filters_merge_mode == 'merge':
+ current_belongsto_set.update(new_belongsto_set)
+ new_filters_resource['belongsto'] = list(current_belongsto_set)
+ else:
+ new_filters_resource['belongsto'] = list(new_belongsto_set)
+ filters_updated = True
+
+ # Process belongsto managed filter tags
+ # The input is in the form dict with keys are the categories and the tags are supplied string array
+ # ManageIQ, the current_managed, uses an array of arrays. One array of categories.
+ # We normalize the user input from a dict with arrays to a dict of sorted arrays
+ # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
+ norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
+
+ if norm_current_filters == norm_managed_filters:
+ if 'managed' in current_filters:
+ new_filters_resource['managed'] = current_filters['managed']
+ else:
+ if managed_filters_merge_mode == 'merge':
+ merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
+ else:
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ filters_updated = True
+
+ if not filters_updated:
+ return None
+
+ return new_filters_resource
+
+ def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
+ """ Creates the group in manageiq.
+
+ Returns:
+ the created group id, name, created_on timestamp,
+ updated_on timestamp.
+ """
+ # check for required arguments
+ for key, value in dict(description=description).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/groups' % self.api_url
+
+ resource = {'description': description}
+
+ if role is not None:
+ resource['role'] = dict(id=role['id'])
+
+ if tenant is not None:
+ resource['tenant'] = dict(id=tenant['id'])
+
+ if norm_managed_filters or belongsto_filters:
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created group %s" % description,
+ group_id=result['results'][0]['id']
+ )
+
+ @staticmethod
+ def normalized_managed_tag_filters_to_miq(norm_managed_filters):
+ if not norm_managed_filters:
+ return None
+
+ return list(norm_managed_filters.values())
+
+ @staticmethod
+ def manageiq_filters_to_sorted_dict(current_filters):
+ current_managed_filters = current_filters.get('managed')
+ if not current_managed_filters:
+ return None
+
+ res = {}
+ for tag_list in current_managed_filters:
+ tag_list.sort()
+ key = tag_list[0].split('/')[2]
+ res[key] = tag_list
+
+ return res
+
+ @staticmethod
+ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
+ if not managed_filters:
+ return None
+
+ res = {}
+ for cat_key in managed_filters:
+ cat_array = []
+ if not isinstance(managed_filters[cat_key], list):
+ module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
+ for tags in managed_filters[cat_key]:
+ miq_managed_tag = "/managed/" + cat_key + "/" + tags
+ cat_array.append(miq_managed_tag)
+ # Do not add empty categories. ManageIQ will remove all categories that are not supplied
+ if cat_array:
+ cat_array.sort()
+ res[cat_key] = cat_array
+ return res
+
+ @staticmethod
+ def create_result_group(group):
+ """ Creates the ansible result object from a manageiq group entity
+
+ Returns:
+ a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
+ """
+ try:
+ role_name = group['miq_user_role_name']
+ except AttributeError:
+ role_name = None
+
+ managed_filters = None
+ belongsto_filters = None
+ if 'filters' in group['entitlement']:
+ filters = group['entitlement']['filters']
+ belongsto_filters = filters.get('belongsto')
+ group_managed_filters = filters.get('managed')
+ if group_managed_filters:
+ managed_filters = {}
+ for tag_list in group_managed_filters:
+ key = tag_list[0].split('/')[2]
+ tags = []
+ for t in tag_list:
+ tags.append(t.split('/')[3])
+ managed_filters[key] = tags
+
+ return dict(
+ id=group['id'],
+ description=group['description'],
+ role=role_name,
+ tenant=group['tenant']['name'],
+ managed_filters=managed_filters,
+ belongsto_filters=belongsto_filters,
+ group_type=group['group_type'],
+ created_on=group['created_on'],
+ updated_on=group['updated_on'],
+ )
+
+
+def main():
+ argument_spec = dict(
+ description=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ role_id=dict(required=False, type='int'),
+ role=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='int'),
+ tenant=dict(required=False, type='str'),
+ managed_filters=dict(required=False, type='dict'),
+ managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(required=False, type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ description = module.params['description']
+ state = module.params['state']
+ role_id = module.params['role_id']
+ role_name = module.params['role']
+ tenant_id = module.params['tenant_id']
+ tenant_name = module.params['tenant']
+ managed_filters = module.params['managed_filters']
+ managed_filters_merge_mode = module.params['managed_filters_merge_mode']
+ belongsto_filters = module.params['belongsto_filters']
+ belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
+
+ manageiq = ManageIQ(module)
+ manageiq_group = ManageIQgroup(manageiq)
+
+ group = manageiq_group.group(description)
+
+ # group should not exist
+ if state == "absent":
+ # if we have a group, delete it
+ if group:
+ res_args = manageiq_group.delete_group(group)
+ # if we do not have a group, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="group '%s' does not exist in manageiq" % description)
+
+ # group should exist
+ if state == "present":
+
+ tenant = manageiq_group.tenant(tenant_id, tenant_name)
+ role = manageiq_group.role(role_id, role_name)
+ norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
+ # if we have a group, edit it
+ if group:
+ res_args = manageiq_group.edit_group(group, description, role, tenant,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+
+ # if we do not have a group, create it
+ else:
+ res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
+ group = manageiq.client.get_entity('groups', res_args['group_id'])
+
+ group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
+ res_args['group'] = manageiq_group.create_result_group(group)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py
new file mode 100644
index 00000000..600c0bff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies
+
+short_description: Management of resource policy_profiles in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - policy_profiles should not exist,
+ - present - policy_profiles should exist,
+ - list - list current policy_profiles and policies.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ policy_profiles:
+ type: list
+ description:
+ - list of dictionaries, each includes the policy_profile 'name' key.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the type of the resource to which the profile should be [un]assigned
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the name of the resource to which the profile should be [un]assigned
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Assign new policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Unassign a policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+manageiq_policies:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ
+ returned: always
+ type: dict
+ sample: '{
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
+ {
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
+ }
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+class ManageIQPolicies(object):
+ """
+ Object to execute policies management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def query_profile_href(self, profile):
+ """ Add or Update the policy_profile href field
+
+ Example:
+ {name: STR, ...} => {name: STR, href: STR}
+ """
+ resource = self.manageiq.find_collection_resource_or_fail(
+ "policy_profiles", **profile)
+ return dict(name=profile['name'], href=resource['href'])
+
+ def query_resource_profiles(self):
+ """ Returns a set of the profile objects objects assigned to the resource
+ """
+ url = '{resource_url}/policy_profiles?expand=resources'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api profile object to look like:
+ # {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
+ profiles = [self.clean_profile_object(profile) for profile in resources]
+
+ return profiles
+
+ def query_profile_policies(self, profile_id):
+ """ Returns a set of the policy objects assigned to the resource
+ """
+ url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
+ try:
+ response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('policies', [])
+
+ # clean the returned rest api policy object to look like:
+ # {name: STR, description: STR, active: BOOL}
+ policies = [self.clean_policy_object(policy) for policy in resources]
+
+ return policies
+
+ def clean_policy_object(self, policy):
+ """ Clean a policy object to have human readable form of:
+ {
+ name: STR,
+ description: STR,
+ active: BOOL
+ }
+ """
+ name = policy.get('name')
+ description = policy.get('description')
+ active = policy.get('active')
+
+ return dict(
+ name=name,
+ description=description,
+ active=active)
+
+ def clean_profile_object(self, profile):
+ """ Clean a profile object to have human readable form of:
+ {
+ profile_name: STR,
+ profile_description: STR,
+ policies: ARR<POLICIES>
+ }
+ """
+ profile_id = profile['id']
+ name = profile.get('name')
+ description = profile.get('description')
+ policies = self.query_profile_policies(profile_id)
+
+ return dict(
+ profile_name=name,
+ profile_description=description,
+ policies=policies)
+
+ def profiles_to_update(self, profiles, action):
+ """ Create a list of policies we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ profiles_to_post = []
+ assigned_profiles = self.query_resource_profiles()
+
+ # make a list of assigned full profile names strings
+ # e.g. ['openscap profile', ...]
+ assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
+
+ for profile in profiles:
+ assigned = profile.get('name') in assigned_profiles_set
+
+ if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
+ # add/update the policy profile href field
+ # {name: STR, ...} => {name: STR, href: STR}
+ profile = self.query_profile_href(profile)
+ profiles_to_post.append(profile)
+
+ return profiles_to_post
+
+ def assign_or_unassign_profiles(self, profiles, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of profiles needed to be changed
+ profiles_to_post = self.profiles_to_update(profiles, action)
+ if not profiles_to_post:
+ return dict(
+ changed=False,
+ msg="Profiles {profiles} already {action}ed, nothing to do".format(
+ action=action,
+ profiles=profiles))
+
+ # try to assign or unassign profiles to resource
+ url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=profiles_to_post)
+ except Exception as e:
+ msg = "Failed to {action} profile: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed profiles
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed profiles: {profiles}".format(
+ action=action,
+ profiles=profiles))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ policy_profiles=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['policy_profiles']),
+ ('state', 'absent', ['policy_profiles'])
+ ],
+ )
+
+ policy_profiles = module.params['policy_profiles']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
+
+ manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+ else:
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py
new file mode 100644
index 00000000..7f55b55b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py
@@ -0,0 +1,928 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: manageiq_provider
+short_description: Management of provider in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ choices: ['absent', 'present', 'refresh']
+ default: 'present'
+ name:
+ type: str
+ description: The provider's name.
+ required: true
+ type:
+ type: str
+ description: The provider's type.
+ choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
+ zone:
+ type: str
+ description: The ManageIQ zone name that will manage the provider.
+ default: 'default'
+ provider_region:
+ type: str
+ description: The provider region name to connect to (e.g. AWS region for Amazon).
+ host_default_vnc_port_start:
+ type: str
+ description: The first port in the host VNC range. defaults to None.
+ host_default_vnc_port_end:
+ type: str
+ description: The last port in the host VNC range. defaults to None.
+ subscription:
+ type: str
+ description: Microsoft Azure subscription ID. defaults to None.
+ project:
+ type: str
+ description: Google Compute Engine Project ID. defaults to None.
+ azure_tenant_id:
+ type: str
+ description: Tenant ID. defaults to None.
+ aliases: [ keystone_v3_domain_id ]
+ tenant_mapping_enabled:
+ type: bool
+ default: 'no'
+ description: Whether to enable mapping of existing tenants. defaults to False.
+ api_version:
+ type: str
+ description: The OpenStack Keystone API version. defaults to None.
+ choices: ['v2', 'v3']
+
+ provider:
+ description: Default endpoint connection information, required if state is true.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ metrics:
+ description: Metrics endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+
+ alerts:
+ description: Alerts endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ type: bool
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ default: true
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ ssh_keypair:
+ description: SSH key pair used for SSH connections to all hosts in this provider.
+ suboptions:
+ hostname:
+ type: str
+ description: Director hostname.
+ required: true
+ userid:
+ type: str
+ description: SSH username.
+ auth_key:
+ type: str
+ description: SSH private key.
+ validate_certs:
+ description:
+ - Whether certificates should be verified for connections.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+'''
+
+EXAMPLES = '''
+- name: Create a new provider in ManageIQ ('Hawkular' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'OpenShift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ role: 'hawkular'
+ hostname: 'example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1:80'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Delete a provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'absent'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Create a new Amazon provider in ManageIQ using token authentication
+ community.general.manageiq_provider:
+ name: 'EngAmazon'
+ type: 'Amazon'
+ state: 'present'
+ provider:
+ hostname: 'amazon.example.com'
+ userid: 'hello'
+ password: 'world'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+
+- name: Create a new oVirt provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'RHEV'
+ type: 'oVirt'
+ state: 'present'
+ provider:
+ hostname: 'rhev01.example.com'
+ userid: 'admin@internal'
+ password: 'password'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ hostname: 'metrics.example.com'
+ path: 'ovirt_engine_history'
+ userid: 'user_id_metrics'
+ password: 'password_metrics'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+- name: Create a new VMware provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngVMware'
+ type: 'VMware'
+ state: 'present'
+ provider:
+ hostname: 'vcenter.example.com'
+ host_default_vnc_port_start: 5800
+ host_default_vnc_port_end: 5801
+ userid: 'root'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+- name: Create a new Azure provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngAzure'
+ type: 'Azure'
+ provider_region: 'northeurope'
+ subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
+ azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
+ state: 'present'
+ provider:
+ hostname: 'azure.example.com'
+ userid: 'e272bd74-f661-484f-b223-88dd128a4049'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://cf-6af0.rhpds.opentlc.com'
+ username: 'admin'
+ password: 'password'
+ validate_certs: false
+
+- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
+ community.general.manageiq_provider:
+ name: 'EngDirector'
+ type: 'Director'
+ api_version: 'v3'
+ state: 'present'
+ provider:
+ hostname: 'director.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ ssh_keypair:
+ hostname: director.example.com
+ userid: heat-admin
+ auth_key: 'SecretSSHPrivateKey'
+
+- name: Create a new OpenStack provider in ManageIQ with amqp metrics
+ community.general.manageiq_provider:
+ name: 'EngOpenStack'
+ type: 'OpenStack'
+ api_version: 'v3'
+ state: 'present'
+ provider_region: 'europe'
+ tenant_mapping_enabled: 'False'
+ keystone_v3_domain_id: 'mydomain'
+ provider:
+ hostname: 'openstack.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ role: amqp
+ hostname: 'amqp.example.com'
+ security_protocol: 'non-ssl'
+ port: 5666
+ userid: admin
+ password: password
+
+
+- name: Create a new GCE provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngGoogle'
+ type: 'GCE'
+ provider_region: 'europe-west1'
+ project: 'project1'
+ state: 'present'
+ provider:
+ hostname: 'gce.example.com'
+ auth_key: 'google_json_key'
+ validate_certs: 'false'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+def supported_providers():
+ return dict(
+ Openshift=dict(
+ class_name='ManageIQ::Providers::Openshift::ContainerManager',
+ authtype='bearer',
+ default_role='default',
+ metrics_role='prometheus',
+ alerts_role='prometheus_alerts',
+ ),
+ Amazon=dict(
+ class_name='ManageIQ::Providers::Amazon::CloudManager',
+ ),
+ oVirt=dict(
+ class_name='ManageIQ::Providers::Redhat::InfraManager',
+ default_role='default',
+ metrics_role='metrics',
+ ),
+ VMware=dict(
+ class_name='ManageIQ::Providers::Vmware::InfraManager',
+ ),
+ Azure=dict(
+ class_name='ManageIQ::Providers::Azure::CloudManager',
+ ),
+ Director=dict(
+ class_name='ManageIQ::Providers::Openstack::InfraManager',
+ ssh_keypair_role="ssh_keypair"
+ ),
+ OpenStack=dict(
+ class_name='ManageIQ::Providers::Openstack::CloudManager',
+ ),
+ GCE=dict(
+ class_name='ManageIQ::Providers::Google::CloudManager',
+ ),
+ )
+
+
+def endpoint_list_spec():
+ return dict(
+ provider=dict(type='dict', options=endpoint_argument_spec()),
+ metrics=dict(type='dict', options=endpoint_argument_spec()),
+ alerts=dict(type='dict', options=endpoint_argument_spec()),
+ ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
+ )
+
+
+def endpoint_argument_spec():
+ return dict(
+ role=dict(),
+ hostname=dict(required=True),
+ port=dict(type='int'),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ certificate_authority=dict(),
+ security_protocol=dict(
+ choices=[
+ 'ssl-with-validation',
+ 'ssl-with-validation-custom-ca',
+ 'ssl-without-validation',
+ 'non-ssl',
+ ],
+ ),
+ userid=dict(),
+ password=dict(no_log=True),
+ auth_key=dict(no_log=True),
+ subscription=dict(no_log=True),
+ project=dict(),
+ uid_ems=dict(),
+ path=dict(),
+ )
+
+
+def delete_nulls(h):
+ """ Remove null entries from a hash
+
+ Returns:
+ a hash without nulls
+ """
+ if isinstance(h, list):
+ return [delete_nulls(i) for i in h]
+ if isinstance(h, dict):
+ return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
+
+ return h
+
+
+class ManageIQProvider(object):
+ """
+ Object to execute provider management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def class_name_to_type(self, class_name):
+ """ Convert class_name to type
+
+ Returns:
+ the type
+ """
+ out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
+ if len(out) == 1:
+ return out[0]
+
+ return None
+
+ def zone_id(self, name):
+ """ Search for zone id by zone name.
+
+ Returns:
+ the zone id, or send a module Fail signal if zone not found.
+ """
+ zone = self.manageiq.find_collection_resource_by('zones', name=name)
+ if not zone: # zone doesn't exist
+ self.module.fail_json(
+ msg="zone %s does not exist in manageiq" % (name))
+
+ return zone['id']
+
+ def provider(self, name):
+ """ Search for provider object by name.
+
+ Returns:
+ the provider, or None if provider not found.
+ """
+ return self.manageiq.find_collection_resource_by('providers', name=name)
+
+ def build_connection_configurations(self, provider_type, endpoints):
+ """ Build "connection_configurations" objects from
+ requested endpoints provided by user
+
+ Returns:
+ the user requested provider endpoints list
+ """
+ connection_configurations = []
+ endpoint_keys = endpoint_list_spec().keys()
+ provider_defaults = supported_providers().get(provider_type, {})
+
+ # get endpoint defaults
+ endpoint = endpoints.get('provider')
+ default_auth_key = endpoint.get('auth_key')
+
+ # build a connection_configuration object for each endpoint
+ for endpoint_key in endpoint_keys:
+ endpoint = endpoints.get(endpoint_key)
+ if endpoint:
+ # get role and authtype
+ role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
+ if role == 'default':
+ authtype = provider_defaults.get('authtype') or role
+ else:
+ authtype = role
+
+ # set a connection_configuration
+ connection_configurations.append({
+ 'endpoint': {
+ 'role': role,
+ 'hostname': endpoint.get('hostname'),
+ 'port': endpoint.get('port'),
+ 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
+ 'security_protocol': endpoint.get('security_protocol'),
+ 'certificate_authority': endpoint.get('certificate_authority'),
+ 'path': endpoint.get('path'),
+ },
+ 'authentication': {
+ 'authtype': authtype,
+ 'userid': endpoint.get('userid'),
+ 'password': endpoint.get('password'),
+ 'auth_key': endpoint.get('auth_key') or default_auth_key,
+ }
+ })
+
+ return connection_configurations
+
+ def delete_provider(self, provider):
+ """ Deletes a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Edit a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ connection_configurations=endpoints,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ )
+
+ # NOTE: we do not check for diff's between requested and current
+ # provider, we always submit endpoints with password or auth_keys,
+ # since we can not compare with current password or auth_key,
+ # every edit request is sent to ManageIQ API without comparing
+ # it to current state.
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to update provider
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the provider %s: %s" % (provider['name'], result))
+
+ def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Creates the provider in manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ connection_configurations=endpoints,
+ )
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to create a new provider
+ try:
+ url = '%s/providers' % (self.api_url)
+ result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the provider %s: %s" % (name, result['results']))
+
+ def refresh(self, provider, name):
+ """ Trigger provider refresh.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='refresh')
+ except Exception as e:
+ self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="refreshing provider %s" % name)
+
+
+def main():
+ zone_id = None
+ endpoints = []
+ argument_spec = dict(
+ state=dict(choices=['absent', 'present', 'refresh'], default='present'),
+ name=dict(required=True),
+ zone=dict(default='default'),
+ provider_region=dict(),
+ host_default_vnc_port_start=dict(),
+ host_default_vnc_port_end=dict(),
+ subscription=dict(),
+ project=dict(),
+ azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
+ tenant_mapping_enabled=dict(default=False, type='bool'),
+ api_version=dict(choices=['v2', 'v3']),
+ type=dict(choices=supported_providers().keys()),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+ # add the endpoint arguments to the arguments
+ argument_spec.update(endpoint_list_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['provider']),
+ ('state', 'refresh', ['name'])],
+ required_together=[
+ ['host_default_vnc_port_start', 'host_default_vnc_port_end']
+ ],
+ )
+
+ name = module.params['name']
+ zone_name = module.params['zone']
+ provider_type = module.params['type']
+ raw_endpoints = module.params
+ provider_region = module.params['provider_region']
+ host_default_vnc_port_start = module.params['host_default_vnc_port_start']
+ host_default_vnc_port_end = module.params['host_default_vnc_port_end']
+ subscription = module.params['subscription']
+ uid_ems = module.params['azure_tenant_id']
+ project = module.params['project']
+ tenant_mapping_enabled = module.params['tenant_mapping_enabled']
+ api_version = module.params['api_version']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_provider = ManageIQProvider(manageiq)
+
+ provider = manageiq_provider.provider(name)
+
+ # provider should not exist
+ if state == "absent":
+ # if we have a provider, delete it
+ if provider:
+ res_args = manageiq_provider.delete_provider(provider)
+ # if we do not have a provider, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ # provider should exist
+ if state == "present":
+ # get data user did not explicitly give
+ if zone_name:
+ zone_id = manageiq_provider.zone_id(zone_name)
+
+ # if we do not have a provider_type, use the current provider_type
+ if provider and not provider_type:
+ provider_type = manageiq_provider.class_name_to_type(provider['type'])
+
+ # check supported_providers types
+ if not provider_type:
+ manageiq_provider.module.fail_json(
+ msg="missing required argument: provider_type")
+
+ # check supported_providers types
+ if provider_type not in supported_providers().keys():
+ manageiq_provider.module.fail_json(
+ msg="provider_type %s is not supported" % (provider_type))
+
+ # build "connection_configurations" objects from user requested endpoints
+ # "provider" is a required endpoint, if we have it, we have endpoints
+ if raw_endpoints.get("provider"):
+ endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
+
+ # if we have a provider, edit it
+ if provider:
+ res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+ # if we do not have a provider, create it
+ else:
+ res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+
+ # refresh provider (trigger sync)
+ if state == "refresh":
+ if provider:
+ res_args = manageiq_provider.refresh(provider, name)
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py
new file mode 100644
index 00000000..68de2324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags
+
+short_description: Management of resource tags in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - tags should not exist,
+ - present - tags should exist,
+ - list - list current tags.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ tags:
+ type: list
+ description:
+ - tags - list of dictionaries, each includes 'name' and 'category' keys.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the relevant resource type in manageiq
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the relevant resource name in manageiq
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create new tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Remove tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def query_resource_id(manageiq, resource_type, resource_name):
+ """ Query the resource name in ManageIQ.
+
+ Returns:
+ the resource id if it exists in manageiq, Fail otherwise.
+ """
+ resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
+ if resource:
+ return resource["id"]
+ else:
+ msg = "{resource_name} {resource_type} does not exist in manageiq".format(
+ resource_name=resource_name, resource_type=resource_type)
+ manageiq.module.fail_json(msg=msg)
+
+
+class ManageIQTags(object):
+ """
+ Object to execute tags management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def full_tag_name(self, tag):
+ """ Returns the full tag name in manageiq
+ """
+ return '/managed/{tag_category}/{tag_name}'.format(
+ tag_category=tag['category'],
+ tag_name=tag['name'])
+
+ def clean_tag_object(self, tag):
+ """ Clean a tag object to have human readable form of:
+ {
+ full_name: STR,
+ name: STR,
+ display_name: STR,
+ category: STR
+ }
+ """
+ full_name = tag.get('name')
+ categorization = tag.get('categorization', {})
+
+ return dict(
+ full_name=full_name,
+ name=categorization.get('name'),
+ display_name=categorization.get('display_name'),
+ category=categorization.get('category', {}).get('name'))
+
+ def query_resource_tags(self):
+ """ Returns a set of the tag objects assigned to the resource
+ """
+ url = '{resource_url}/tags?expand=resources&attributes=categorization'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} tags: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api tag object to look like:
+ # {full_name: STR, name: STR, display_name: STR, category: STR}
+ tags = [self.clean_tag_object(tag) for tag in resources]
+
+ return tags
+
+ def tags_to_update(self, tags, action):
+ """ Create a list of tags we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ tags_to_post = []
+ assigned_tags = self.query_resource_tags()
+
+ # make a list of assigned full tag names strings
+ # e.g. ['/managed/environment/prod', ...]
+ assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
+
+ for tag in tags:
+ assigned = self.full_tag_name(tag) in assigned_tags_set
+
+ if assigned and action == 'unassign':
+ tags_to_post.append(tag)
+ elif (not assigned) and action == 'assign':
+ tags_to_post.append(tag)
+
+ return tags_to_post
+
+ def assign_or_unassign_tags(self, tags, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of tags needed to be changed
+ tags_to_post = self.tags_to_update(tags, action)
+ if not tags_to_post:
+ return dict(
+ changed=False,
+ msg="Tags already {action}ed, nothing to do".format(action=action))
+
+ # try to assign or unassign tags to resource
+ url = '{resource_url}/tags'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=tags)
+ except Exception as e:
+ msg = "Failed to {action} tag: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed tags
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed tags".format(action=action))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ tags=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['tags']),
+ ('state', 'absent', ['tags'])
+ ],
+ )
+
+ tags = module.params['tags']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = query_resource_id(manageiq, resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+ else:
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
new file mode 100644
index 00000000..3ec174cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_tenant
+
+short_description: Management of tenants in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
+requirements:
+- manageiq-client
+options:
+ state:
+ type: str
+ description:
+ - absent - tenant should not exist, present - tenant should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The tenant name.
+ required: true
+ default: null
+ description:
+ type: str
+ description:
+ - The tenant description.
+ required: true
+ default: null
+ parent_id:
+ type: int
+ description:
+ - The id of the parent tenant. If not supplied the root tenant is used.
+ - The C(parent_id) takes president over C(parent) when supplied
+ required: false
+ default: null
+ parent:
+ type: str
+ description:
+ - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ required: false
+ default: null
+ quotas:
+ type: dict
+ description:
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(cpu_allocated) (int): use null to remove the quota.'
+ - ' - C(mem_allocated) (GB): use null to remove the quota.'
+ - ' - C(storage_allocated) (GB): use null to remove the quota.'
+ - ' - C(vms_allocated) (int): use null to remove the quota.'
+ - ' - C(templates_allocated) (int): use null to remove the quota.'
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- name: Update the root tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'My Company'
+ description: 'My company name'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ description: 'Manufacturing department'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ parent_id: 1
+ quotas:
+ - cpu_allocated: 100
+ - mem_allocated: 50
+ - vms_allocated: null
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+
+- name: Delete a tenant in ManageIQ using a token
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+tenant:
+ description: The tenant.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The tenant id
+ returned: success
+ type: int
+ name:
+ description: The tenant name
+ returned: success
+ type: str
+ description:
+ description: The tenant description
+ returned: success
+ type: str
+ parent_id:
+ description: The id of the parent tenant
+ returned: success
+ type: int
+ quotas:
+ description: List of tenant quotas
+ returned: success
+ type: list
+ sample:
+ cpu_allocated: 100
+ mem_allocated: 50
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQTenant(object):
+ """
+ Object to execute tenant management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def tenant(self, name, parent_id, parent):
+ """ Search for tenant object by name and parent_id or parent
+ or the root tenant if no parent or parent_id is supplied.
+ Returns:
+ the parent tenant, None for the root tenant
+ the tenant or None if tenant was not found.
+ """
+
+ if parent_id:
+ parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
+ parent_tenant = parent_tenant_res[0]
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if int(tenant_parent_id) == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ if parent:
+ parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
+
+ if len(parent_tenant_res) > 1:
+ self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
+
+ parent_tenant = parent_tenant_res[0]
+ parent_id = int(parent_tenant['id'])
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if tenant_parent_id == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ # No parent or parent id supplied we select the root tenant
+ return None, self.client.collections.tenants.find_by(ancestry=None)[0]
+
+ def compare_tenant(self, tenant, name, description):
+ """ Compare tenant fields with new field values.
+
+ Returns:
+ false if tenant fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and tenant['name'] != name) or
+ (description and tenant['description'] != description)
+ )
+
+ return not found_difference
+
+ def delete_tenant(self, tenant):
+ """ Deletes a tenant from manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ try:
+ url = '%s/tenants/%s' % (self.api_url, tenant['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_tenant(self, tenant, name, description):
+ """ Edit a manageiq tenant.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ resource = dict(name=name, description=description, use_config_for_attributes=False)
+
+ # check if we need to update ( compare_tenant is true is no difference found )
+ if self.compare_tenant(tenant, name, description):
+ return dict(
+ changed=False,
+ msg="tenant %s is not changed." % tenant['name'],
+ tenant=tenant['_data'])
+
+ # try to update tenant
+ try:
+ result = self.client.post(tenant['href'], action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the tenant with id %s" % (tenant['id']))
+
+ def create_tenant(self, name, description, parent_tenant):
+ """ Creates the tenant in manageiq.
+
+ Returns:
+ dict with `msg`, `changed` and `tenant_id`
+ """
+ parent_id = parent_tenant['id']
+ # check for required arguments
+ for key, value in dict(name=name, description=description, parent_id=parent_id).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/tenants' % self.api_url
+
+ resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ tenant_id = result['results'][0]['id']
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
+ tenant_id=tenant_id)
+
+ def tenant_quota(self, tenant, quota_key):
+ """ Search for tenant quota object by tenant and quota_key.
+ Returns:
+ the quota for the tenant, or None if the tenant quota was not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
+
+ return tenant_quotas['resources']
+
+ def tenant_quotas(self, tenant):
+ """ Search for tenant quotas object by tenant.
+ Returns:
+ the quotas for the tenant, or None if no tenant quotas were not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
+
+ return tenant_quotas['resources']
+
+ def update_tenant_quotas(self, tenant, quotas):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+
+ changed = False
+ messages = []
+ for quota_key, quota_value in quotas.items():
+ current_quota_filtered = self.tenant_quota(tenant, quota_key)
+ if current_quota_filtered:
+ current_quota = current_quota_filtered[0]
+ else:
+ current_quota = None
+
+ if quota_value:
+ # Change the byte values to GB
+ if quota_key in ['storage_allocated', 'mem_allocated']:
+ quota_value_int = int(quota_value) * 1024 * 1024 * 1024
+ else:
+ quota_value_int = int(quota_value)
+ if current_quota:
+ res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
+ else:
+ res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
+ else:
+ if current_quota:
+ res = self.delete_tenant_quota(tenant, current_quota)
+ else:
+ res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
+
+ if res['changed']:
+ changed = True
+
+ messages.append(res['msg'])
+
+ return dict(
+ changed=changed,
+ msg=', '.join(messages))
+
+ def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
+ """ Update the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+
+ if current_quota['value'] == quota_value:
+ return dict(
+ changed=False,
+ msg="tenant quota %s already has value %s" % (quota_key, quota_value))
+ else:
+
+ url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
+ resource = {'value': quota_value}
+ try:
+ self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated tenant quota %s" % quota_key)
+
+ def create_tenant_quota(self, tenant, quota_key, quota_value):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ url = '%s/quotas' % (tenant['href'])
+ resource = {'name': quota_key, 'value': quota_value}
+ try:
+ self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant quota %s" % quota_key)
+
+ def delete_tenant_quota(self, tenant, quota):
+ """ deletes the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ try:
+ result = self.client.post(quota['href'], action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def create_tenant_response(self, tenant, parent_tenant):
+ """ Creates the ansible result object from a manageiq tenant entity
+
+ Returns:
+ a dict with the tenant id, name, description, parent id,
+ quota's
+ """
+ tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
+
+ try:
+ ancestry = tenant['ancestry']
+ tenant_parent_id = ancestry.split("/")[-1]
+ except AttributeError:
+ # The root tenant does not return the ancestry attribute
+ tenant_parent_id = None
+
+ return dict(
+ id=tenant['id'],
+ name=tenant['name'],
+ description=tenant['description'],
+ parent_id=tenant_parent_id,
+ quotas=tenant_quotas
+ )
+
+ @staticmethod
+ def create_tenant_quotas_response(tenant_quotas):
+ """ Creates the ansible result object from a manageiq tenant_quotas entity
+
+ Returns:
+ a dict with the applied quotas, name and value
+ """
+
+ if not tenant_quotas:
+ return {}
+
+ result = {}
+ for quota in tenant_quotas:
+ if quota['unit'] == 'bytes':
+ value = float(quota['value']) / (1024 * 1024 * 1024)
+ else:
+ value = quota['value']
+ result[quota['name']] = value
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ description=dict(required=True, type='str'),
+ parent_id=dict(required=False, type='int'),
+ parent=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ quotas=dict(type='dict', default={})
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ name = module.params['name']
+ description = module.params['description']
+ parent_id = module.params['parent_id']
+ parent = module.params['parent']
+ state = module.params['state']
+ quotas = module.params['quotas']
+
+ manageiq = ManageIQ(module)
+ manageiq_tenant = ManageIQTenant(manageiq)
+
+ parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
+
+ # tenant should not exist
+ if state == "absent":
+ # if we have a tenant, delete it
+ if tenant:
+ res_args = manageiq_tenant.delete_tenant(tenant)
+ # if we do not have a tenant, nothing to do
+ else:
+ if parent_id:
+ msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
+ else:
+ msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
+
+ res_args = dict(
+ changed=False,
+ msg=msg)
+
+ # tenant should exist
+ if state == "present":
+ # if we have a tenant, edit it
+ if tenant:
+ res_args = manageiq_tenant.edit_tenant(tenant, name, description)
+
+ # if we do not have a tenant, create it
+ else:
+ res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
+ tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
+
+ # quotas as supplied and we have a tenant
+ if quotas:
+ tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
+ if tenant_quotas_res['changed']:
+ res_args['changed'] = True
+ res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
+
+ tenant.reload(expand='resources', attributes=['tenant_quotas'])
+ res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py
new file mode 100644
index 00000000..8905dde2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+#
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_user
+
+short_description: Management of users in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_user module supports adding, updating and deleting users in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - user should not exist, present - user should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ userid:
+ type: str
+ description:
+ - The unique userid in manageiq, often mentioned as username.
+ required: true
+ name:
+ type: str
+ description:
+ - The users' full name.
+ password:
+ type: str
+ description:
+ - The users' password.
+ group:
+ type: str
+ description:
+ - The name of the group to which the user belongs.
+ email:
+ type: str
+ description:
+ - The users' E-mail address.
+ update_password:
+ type: str
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+'''
+
+EXAMPLES = '''
+- name: Create a new user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a new user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ using a token
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQUser(object):
+ """
+ Object to execute user management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group_id(self, description):
+ """ Search for group id by group description.
+
+ Returns:
+ the group id, or send a module Fail signal if group not found.
+ """
+ group = self.manageiq.find_collection_resource_by('groups', description=description)
+ if not group: # group doesn't exist
+ self.module.fail_json(
+ msg="group %s does not exist in manageiq" % (description))
+
+ return group['id']
+
+ def user(self, userid):
+ """ Search for user object by userid.
+
+ Returns:
+ the user, or None if user not found.
+ """
+ return self.manageiq.find_collection_resource_by('users', userid=userid)
+
+ def compare_user(self, user, name, group_id, password, email):
+ """ Compare user fields with new field values.
+
+ Returns:
+ false if user fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and user['name'] != name) or
+ (password is not None) or
+ (email and user['email'] != email) or
+ (group_id and user['current_group_id'] != group_id)
+ )
+
+ return not found_difference
+
+ def delete_user(self, user):
+ """ Deletes a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/users/%s' % (self.api_url, user['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_user(self, user, name, group, password, email):
+ """ Edit a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ group_id = None
+ url = '%s/users/%s' % (self.api_url, user['id'])
+
+ resource = dict(userid=user['userid'])
+ if group is not None:
+ group_id = self.group_id(group)
+ resource['group'] = dict(id=group_id)
+ if name is not None:
+ resource['name'] = name
+ if email is not None:
+ resource['email'] = email
+
+ # if there is a password param, but 'update_password' is 'on_create'
+ # then discard the password (since we're editing an existing user)
+ if self.module.params['update_password'] == 'on_create':
+ password = None
+ if password is not None:
+ resource['password'] = password
+
+ # check if we need to update ( compare_user is true is no difference found )
+ if self.compare_user(user, name, group_id, password, email):
+ return dict(
+ changed=False,
+ msg="user %s is not changed." % (user['userid']))
+
+ # try to update user
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the user %s: %s" % (user['userid'], result))
+
+ def create_user(self, userid, name, group, password, email):
+ """ Creates the user in manageiq.
+
+ Returns:
+ the created user id, name, created_on timestamp,
+ updated_on timestamp, userid and current_group_id.
+ """
+ # check for required arguments
+ for key, value in dict(name=name, group=group, password=password).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % (key))
+
+ group_id = self.group_id(group)
+ url = '%s/users' % (self.api_url)
+
+ resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
+ if email is not None:
+ resource['email'] = email
+
+ # try to create a new user
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the user %s: %s" % (userid, result['results']))
+
+
+def main():
+ argument_spec = dict(
+ userid=dict(required=True, type='str'),
+ name=dict(),
+ password=dict(no_log=True),
+ group=dict(),
+ email=dict(),
+ state=dict(choices=['absent', 'present'], default='present'),
+ update_password=dict(choices=['always', 'on_create'],
+ default='always'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ userid = module.params['userid']
+ name = module.params['name']
+ password = module.params['password']
+ group = module.params['group']
+ email = module.params['email']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_user = ManageIQUser(manageiq)
+
+ user = manageiq_user.user(userid)
+
+ # user should not exist
+ if state == "absent":
+ # if we have a user, delete it
+ if user:
+ res_args = manageiq_user.delete_user(user)
+ # if we do not have a user, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="user %s: does not exist in manageiq" % (userid))
+
+ # user should exist
+ if state == "present":
+ # if we have a user, edit it
+ if user:
+ res_args = manageiq_user.edit_user(user, name, group, password, email)
+ # if we do not have a user, create it
+ else:
+ res_args = manageiq_user.create_user(userid, name, group, password, email)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py
new file mode 100644
index 00000000..bc3e6dfd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mas
+short_description: Manage Mac App Store applications with mas-cli
+description:
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+version_added: '0.2.0'
+author:
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
+options:
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The C(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: "no"
+ aliases: ["upgrade"]
+requirements:
+ - macOS 10.11+
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Install Keynote
+ community.general.mas:
+ id: 409183694
+ state: present
+
+- name: Install Divvy with command mas installed in /usr/local/bin
+ community.general.mas:
+ id: 413857545
+ state: present
+ environment:
+ PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
+
+- name: Install a list of apps
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+
+- name: Ensure the latest Keynote version is installed
+ community.general.mas:
+ id: 409183694
+ state: latest
+
+- name: Upgrade all installed Mac App Store apps
+ community.general.mas:
+ upgrade_all: yes
+
+- name: Install specific apps and also upgrade all others
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+ upgrade_all: yes
+
+- name: Uninstall Divvy
+ community.general.mas:
+ id: 413857545
+ state: absent
+ become: yes # Uninstallation requires root permissions
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import StrictVersion
+import os
+
+
+class Mas(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # Initialize data properties
+ self.mas_path = self.module.get_bin_path('mas')
+ self._checked_signin = False
+ self._installed = None # Populated only if needed
+ self._outdated = None # Populated only if needed
+ self.count_install = 0
+ self.count_upgrade = 0
+ self.count_uninstall = 0
+ self.result = {
+ 'changed': False
+ }
+
+ self.check_mas_tool()
+
+ def app_command(self, command, id):
+ ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
+
+ if not self.module.check_mode:
+ if command != 'uninstall':
+ self.check_signin()
+
+ rc, out, err = self.run([command, str(id)])
+ if rc != 0:
+ self.module.fail_json(
+ msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
+ )
+
+ # No error or dry run
+ self.__dict__['count_' + command] += 1
+
+ def check_mas_tool(self):
+ ''' Verifies that the `mas` tool is available in a recent version '''
+
+ # Is the `mas` tool available at all?
+ if not self.mas_path:
+ self.module.fail_json(msg='Required `mas` tool is not installed')
+
+ # Is the version recent enough?
+ rc, out, err = self.run(['version'])
+ if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
+ self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
+
+ def check_signin(self):
+ ''' Verifies that the user is signed in to the Mac App Store '''
+
+ # Only check this once per execution
+ if self._checked_signin:
+ return
+
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
+
+ self._checked_signin = True
+
+ def exit(self):
+ ''' Exit with the data we have collected over time '''
+
+ msgs = []
+ if self.count_install > 0:
+ msgs.append('Installed {0} app(s)'.format(self.count_install))
+ if self.count_upgrade > 0:
+ msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
+ if self.count_uninstall > 0:
+ msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
+
+ if msgs:
+ self.result['changed'] = True
+ self.result['msg'] = ', '.join(msgs)
+
+ self.module.exit_json(**self.result)
+
+ def get_current_state(self, command):
+ ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
+
+ rc, raw_apps, err = self.run([command])
+ rows = raw_apps.split("\n")
+ if rows[0] == "No installed apps found":
+ rows = []
+ apps = []
+ for r in rows:
+ # Format: "123456789 App Name"
+ r = r.split(' ', 1)
+ if len(r) == 2:
+ apps.append(int(r[0]))
+
+ return apps
+
+ def installed(self):
+ ''' Returns the list of installed apps '''
+
+ # Populate cache if not already done
+ if self._installed is None:
+ self._installed = self.get_current_state('list')
+
+ return self._installed
+
+ def is_installed(self, id):
+ ''' Checks whether the given app is installed '''
+
+ return int(id) in self.installed()
+
+ def is_outdated(self, id):
+ ''' Checks whether the given app is installed, but outdated '''
+
+ return int(id) in self.outdated()
+
+ def outdated(self):
+ ''' Returns the list of installed, but outdated apps '''
+
+ # Populate cache if not already done
+ if self._outdated is None:
+ self._outdated = self.get_current_state('outdated')
+
+ return self._outdated
+
+ def run(self, cmd):
+ ''' Runs a command of the `mas` tool '''
+
+ cmd.insert(0, self.mas_path)
+ return self.module.run_command(cmd, False)
+
+ def upgrade_all(self):
+ ''' Upgrades all installed apps and sets the correct result data '''
+
+ outdated = self.outdated()
+
+ if not self.module.check_mode:
+ self.check_signin()
+
+ rc, out, err = self.run(['upgrade'])
+ if rc != 0:
+ self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
+
+ self.count_upgrade += len(outdated)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='list', elements='int'),
+ state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
+ upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
+ ),
+ supports_check_mode=True
+ )
+ mas = Mas(module)
+
+ if module.params['id']:
+ apps = module.params['id']
+ else:
+ apps = []
+
+ state = module.params['state']
+ upgrade = module.params['upgrade_all']
+
+ # Run operations on the given app IDs
+ for app in sorted(set(apps)):
+ if state == 'present':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+
+ elif state == 'absent':
+ if mas.is_installed(app):
+ # Ensure we are root
+ if os.getuid() != 0:
+ module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
+
+ mas.app_command('uninstall', app)
+
+ elif state == 'latest':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+ elif mas.is_outdated(app):
+ mas.app_command('upgrade', app)
+
+ # Upgrade all apps if requested
+ mas._outdated = None # Clear cache
+ if upgrade and mas.outdated():
+ mas.upgrade_all()
+
+ # Exit with the collected data
+ mas.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py
new file mode 100644
index 00000000..d94ed2b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: "Jan Christian Grünhage (@jcgruenhage)"
+module: matrix
+short_description: Send notifications to matrix
+description:
+ - This module sends html formatted notifications to matrix rooms.
+options:
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, user_id and password are not required
+ user_id:
+ type: str
+ description:
+ - The user id of the user
+ password:
+ type: str
+ description:
+ - The password to log in with
+requirements:
+ - matrix-client (Python library)
+'''
+
+EXAMPLES = '''
+- name: Send matrix notification with token
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ token: "{{ matrix_auth_token }}"
+
+- name: Send matrix notification with user_id and password
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ user_id: "ansible_notification_bot"
+ password: "{{ matrix_auth_password }}"
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MATRIX_IMP_ERR = None
+try:
+ from matrix_client.client import MatrixClient
+except ImportError:
+ MATRIX_IMP_ERR = traceback.format_exc()
+ matrix_found = False
+else:
+ matrix_found = True
+
+
+def run_module():
+ module_args = dict(
+ msg_plain=dict(type='str', required=True),
+ msg_html=dict(type='str', required=True),
+ room_id=dict(type='str', required=True),
+ hs_url=dict(type='str', required=True),
+ token=dict(type='str', required=False, no_log=True),
+ user_id=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[['password', 'token']],
+ required_one_of=[['password', 'token']],
+ required_together=[['user_id', 'password']],
+ supports_check_mode=True
+ )
+
+ if not matrix_found:
+ module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
+
+ if module.check_mode:
+ return result
+
+ # create a client object
+ client = MatrixClient(module.params['hs_url'])
+ if module.params['token'] is not None:
+ client.api.token = module.params['token']
+ else:
+ client.login(module.params['user_id'], module.params['password'], sync=False)
+
+ # make sure we are in a given room and return a room object for it
+ room = client.join_room(module.params['room_id'])
+ # send an html formatted messages
+ room.send_html(module.params['msg_html'], module.params['msg_plain'])
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py
new file mode 100644
index 00000000..579cfa5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Benjamin Jolivot <bjolivot@gmail.com>
+# Inspired by slack module :
+# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# # (c) 2016, René Moser <mail@renemoser.net>
+# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mattermost
+short_description: Send Mattermost notifications
+description:
+ - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+author: "Benjamin Jolivot (@bjolivot)"
+options:
+ url:
+ type: str
+ description:
+ - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ required: true
+ api_key:
+ type: str
+ description:
+ - Mattermost webhook api key. Log into your mattermost site, go to
+ Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
+ This will give you full URL. api_key is the last part.
+ http://mattermost.example.com/hooks/C(API_KEY)
+ required: true
+ text:
+ type: str
+ description:
+ - Text to send. Note that the module does not handle escaping characters.
+ required: true
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ username:
+ type: str
+ description:
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ default: Ansible
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon.
+ default: https://www.ansible.com/favicon.ico
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: yes
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Send notification message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+
+- name: Send notification message via Mattermost all options
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+ channel: notifications
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+"""
+
+RETURN = '''
+payload:
+ description: Mattermost payload
+ returned: success
+ type: str
+webhook_url:
+ description: URL the webhook is sent to
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str', required=True),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+ # init return dict
+ result = dict(changed=False, msg="OK")
+
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
+ result['webhook_url'] = webhook_url
+
+ # define payload
+ payload = {}
+ for param in ['text', 'channel', 'username', 'icon_url']:
+ if module.params[param] is not None:
+ payload[param] = module.params[param]
+
+ payload = module.jsonify(payload)
+ result['payload'] = payload
+
+ # http headers
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
+
+ # send request if not in test mode
+ if module.check_mode is False:
+ response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
+
+ # something's wrong
+ if info['status'] != 200:
+ # some problem
+ result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
+ module.fail_json(**result)
+
+ # Looks good
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py
new file mode 100644
index 00000000..03c3d4d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
+ version if one is not available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - lxml
+ - boto if using a S3 repository (s3://...)
+options:
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate
+ - Mutually exclusive with I(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
+ - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
+ - Mutually exclusive with I(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ - Use file://... if the repository is local, added in version 2.6
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ aliases: [ "aws_secret_key" ]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ aliases: [ "aws_secret_access_key" ]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ default: 'no'
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact
+ default: present
+ choices: [present,absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ default: 10
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
+ type: bool
+ default: 'yes'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ is defined.
+ type: bool
+ default: 'no'
+ verify_checksum:
+ type: str
+ description:
+ - If C(never), the md5 checksum will never be downloaded and verified.
+ - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If C(change), the md5 checksum will be downloaded and verified if the destination already exist,
+ to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe)
+ downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
+ if the artifact has not been cached yet, it may fail unexpectedly.
+ If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ use it to verify integrity after download.
+ - C(always) combines C(download) and C(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+extends_documentation_fragment:
+ - files
+'''
+
+EXAMPLES = '''
+- name: Download the latest version of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+- name: Download JUnit 4.11 from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
+
+- name: Download an artifact from a private repository requiring authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
+
+- name: Download an artifact from a private repository requiring certificate authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ client_cert: /path/to/cert.pem
+ client_key: /path/to/key.pem
+ dest: /tmp/library-name-latest.jar
+
+- name: Download a WAR File to the Tomcat webapps directory to be deployed
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
+
+- name: Keep a downloaded artifact's name, i.e. retain the version
+ community.general.maven_artifact:
+ version: latest
+ artifact_id: spring-core
+ group_id: org.springframework
+ dest: /tmp/
+ keep_name: yes
+
+- name: Download the latest version of the JUnit framework artifact from Maven local
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+ repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
+
+- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version_by_spec: "[3.8,4.0)"
+ dest: /tmp/
+'''
+
+import hashlib
+import os
+import posixpath
+import shutil
+import io
+import tempfile
+import traceback
+import re
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from re import match
+
+LXML_ETREE_IMP_ERR = None
+try:
+ from lxml import etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+BOTO_IMP_ERR = None
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+SEMANTIC_VERSION_IMP_ERR = None
+try:
+ from semantic_version import Version, Spec
+ HAS_SEMANTIC_VERSION = True
+except ImportError:
+ SEMANTIC_VERSION_IMP_ERR = traceback.format_exc()
+ HAS_SEMANTIC_VERSION = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
+ if head == dirname:
+ return None, [head]
+ else:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return head, [tail]
+ new_directory_list.append(tail)
+ return pre_existing_dir, new_directory_list
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+ if new_directory_list:
+ first_sub_dir = new_directory_list.pop(0)
+ if not pre_existing_dir:
+ working_dir = first_sub_dir
+ else:
+ working_dir = os.path.join(pre_existing_dir, first_sub_dir)
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.version_by_spec = version_by_spec
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version)
+ if timestamp_version_match:
+ base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT")
+ else:
+ base = posixpath.join(base, self.version)
+ return base
+
+ def _generate_filename(self):
+ filename = self.artifact_id + "-" + self.classifier + "." + self.extension
+ if not self.classifier:
+ filename = self.artifact_id + "." + self.extension
+ return filename
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+ if self.classifier:
+ result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ return result
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[len(parts) - 1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base, local=False, headers=None):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.local = local
+ self.headers = headers
+ self.user_agent = "Ansible {0} maven_artifact".format(ansible_version)
+ self.latest_version_found = None
+ self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
+
+ def find_version_by_spec(self, artifact):
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ original_versions = xml.xpath("/metadata/versioning/versions/version/text()")
+ versions = []
+ for version in original_versions:
+ try:
+ versions.append(Version.coerce(version))
+ except ValueError:
+ # This means that version string is not a valid semantic versioning
+ pass
+
+ parse_versions_syntax = {
+ # example -> (,1.0]
+ r"^\(,(?P<upper_bound>[0-9.]*)]$": "<={upper_bound}",
+ # example -> 1.0
+ r"^(?P<version>[0-9.]*)$": "~={version}",
+ # example -> [1.0]
+ r"^\[(?P<version>[0-9.]*)\]$": "=={version}",
+ # example -> [1.2, 1.3]
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]*)\]$": ">={lower_bound},<={upper_bound}",
+ # example -> [1.2, 1.3)
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]+)\)$": ">={lower_bound},<{upper_bound}",
+ # example -> [1.5,)
+ r"^\[(?P<lower_bound>[0-9.]*),\)$": ">={lower_bound}",
+ }
+
+ for regex, spec_format in parse_versions_syntax.items():
+ regex_result = match(regex, artifact.version_by_spec)
+ if regex_result:
+ spec = Spec(spec_format.format(**regex_result.groupdict()))
+ selected_version = spec.select(versions)
+
+ if not selected_version:
+ raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec))
+
+ # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0)
+ if str(selected_version) not in original_versions:
+ selected_version.patch = None
+
+ return str(selected_version)
+
+ raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec))
+
+ def find_latest_version_available(self, artifact):
+ if self.latest_version_found:
+ return self.latest_version_found
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ self.latest_version_found = v[0]
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version_by_spec:
+ artifact.version = self.find_version_by_spec(artifact)
+
+ if artifact.version == "latest":
+ artifact.version = self.find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ if self.local:
+ return self._uri_for_artifact(artifact, artifact.version)
+ path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ classifier = snapshotArtifact.xpath("classifier/text()")
+ artifact_classifier = classifier[0] if classifier else ''
+ extension = snapshotArtifact.xpath("extension/text()")
+ artifact_extension = extension[0] if extension else ''
+ if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
+ timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
+ if timestamp_xmlpath:
+ timestamp = timestamp_xmlpath[0]
+ build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ # for small files, directly get the full content
+ def _getContent(self, url, failmsg, force=True):
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ with io.open(parsed_url.path, 'rb') as f:
+ return f.read()
+ if force:
+ raise ValueError(failmsg + " because can not find file: " + url)
+ return None
+ response = self._request(url, failmsg, force)
+ if response:
+ return response.read()
+ return None
+
+ # only for HTTP request
+ def _request(self, url, failmsg, force=True):
+ url_to_use = url
+ parsed_url = urlparse(url)
+
+ if parsed_url.scheme == 's3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.user_agent
+
+ response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers)
+ if info['status'] == 200:
+ return response
+ if force:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ return None
+
+ def download(self, tmpdir, artifact, verify_download, filename=None):
+ if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None,
+ artifact.classifier, artifact.extension)
+ url = self.find_uri_for_artifact(artifact)
+ tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
+
+ try:
+ # copy to temp file
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ shutil.copy2(parsed_url.path, tempname)
+ else:
+ return "Can not find local file: " + parsed_url.path
+ else:
+ response = self._request(url, "Failed to download artifact " + str(artifact))
+ with os.fdopen(tempfd, 'wb') as f:
+ shutil.copyfileobj(response, f)
+
+ if verify_download:
+ invalid_md5 = self.is_invalid_md5(tempname, url)
+ if invalid_md5:
+ # if verify_change was set, the previous file would be deleted
+ os.remove(tempname)
+ return invalid_md5
+ except Exception as e:
+ os.remove(tempname)
+ raise e
+
+ # all good, now copy temp file to target
+ shutil.move(tempname, artifact.get_filename(filename))
+ return None
+
+ def is_invalid_md5(self, file, remote_url):
+ if os.path.exists(file):
+ local_md5 = self._local_md5(file)
+ if self.local:
+ parsed_url = urlparse(remote_url)
+ remote_md5 = self._local_md5(parsed_url.path)
+ else:
+ try:
+ remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict')
+ except UnicodeError as e:
+ return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e))
+ if(not remote_md5):
+ return "Cannot find md5 from " + remote_url
+ try:
+ # Check if remote md5 only contains md5 or md5 + filename
+ _remote_md5 = remote_md5.split(None)[0]
+ remote_md5 = _remote_md5
+ # remote_md5 is empty so we continue and keep original md5 string
+ # This should not happen since we check for remote_md5 before
+ except IndexError:
+ pass
+ if local_md5.lower() == remote_md5.lower():
+ return None
+ else:
+ return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5
+
+ return "Path does not exist: " + file
+
+ def _local_md5(self, file):
+ md5 = hashlib.md5()
+ with io.open(file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ md5.update(chunk)
+ return md5.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group_id=dict(required=True),
+ artifact_id=dict(required=True),
+ version=dict(default=None),
+ version_by_spec=dict(default=None),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default='https://repo1.maven.org/maven2'),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ headers=dict(type='dict'),
+ force_basic_auth=dict(default=False, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", required=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ client_cert=dict(type="path", required=False),
+ client_key=dict(type="path", required=False),
+ keep_name=dict(required=False, default=False, type='bool'),
+ verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
+ directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure
+ # if this should really be here.
+ ),
+ add_file_common_args=True,
+ mutually_exclusive=([('version', 'version_by_spec')])
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
+ module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR)
+
+ repository_url = module.params["repository_url"]
+ if not repository_url:
+ repository_url = "https://repo1.maven.org/maven2"
+ try:
+ parsed_url = urlparse(repository_url)
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ local = parsed_url.scheme == "file"
+
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'),
+ exception=BOTO_IMP_ERR)
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ version_by_spec = module.params["version_by_spec"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ headers = module.params['headers']
+ state = module.params["state"]
+ dest = module.params["dest"]
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ keep_name = module.params["keep_name"]
+ verify_checksum = module.params["verify_checksum"]
+ verify_download = verify_checksum in ['download', 'always']
+ verify_change = verify_checksum in ['change', 'always']
+
+ downloader = MavenDownloader(module, repository_url, local, headers)
+
+ if not version_by_spec and not version:
+ version = "latest"
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ changed = False
+ prev_state = "absent"
+
+ if dest.endswith(os.sep):
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
+ os.makedirs(b_dest)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ version_part = version
+ if version == 'latest':
+ version_part = downloader.find_latest_version_available(artifact)
+ elif version_by_spec:
+ version_part = downloader.find_version_by_spec(artifact)
+
+ filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
+ artifact_id=artifact_id,
+ version_part="-{0}".format(version_part) if keep_name else "",
+ classifier="-{0}".format(classifier) if classifier else "",
+ extension=extension
+ )
+ dest = posixpath.join(dest, filename)
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
+ prev_state = "present"
+
+ if prev_state == "absent":
+ try:
+ download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
+ if download_error is None:
+ changed = True
+ else:
+ module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+ if changed:
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=changed)
+ else:
+ module.exit_json(state=state, dest=dest, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
new file mode 100644
index 00000000..6eefe133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_dns_reload
+author: "Simon Weald (@glitchcrab)"
+short_description: Request reload of Memset's DNS infrastructure,
+notes:
+ - DNS reload requests are a best-effort service provided by Memset; these generally
+ happen every 15 minutes by default, however you can request an immediate reload if
+ later tasks rely on the records being created. An API key generated via the
+ Memset customer control panel is required with the following minimum scope -
+ I(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then I(job.status) is also required.
+description:
+ - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - Boolean value, if set will poll the reload job's status and return
+ when the job has completed (unless the 30 second timeout is reached first).
+ If the timeout is reached then the task will not be marked as failed, but
+ stderr will indicate that the polling failed.
+'''
+
+EXAMPLES = '''
+- name: Submit DNS reload and poll
+ community.general.memset_dns_reload:
+ api_key: 5eb86c9196ab03919abcf03857163741
+ poll: True
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Raw response from the Memset API.
+ returned: always
+ type: complex
+ contains:
+ error:
+ description: Whether the job ended in error state.
+ returned: always
+ type: bool
+ sample: true
+ finished:
+ description: Whether the job completed before the result was returned.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description: Job ID.
+ returned: always
+ type: str
+ sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
+ status:
+ description: Job status.
+ returned: always
+ type: str
+ sample: "DONE"
+ type:
+ description: Job type.
+ returned: always
+ type: str
+ sample: "dns"
+'''
+
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def poll_reload_status(api_key=None, job_id=None, payload=None):
+ '''
+ We poll the `job.status` endpoint every 5 seconds up to a
+ maximum of 6 times. This is a relatively arbitrary choice of
+ timeout, however requests rarely take longer than 15 seconds
+ to complete.
+ '''
+ memset_api, stderr, msg = None, None, None
+ payload['id'] = job_id
+
+ api_method = 'job.status'
+ _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+
+ while not response.json()['finished']:
+ counter = 0
+ while counter < 6:
+ sleep(5)
+ _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+ counter += 1
+ if response.json()['error']:
+ # the reload job was submitted but polling failed. Don't return this as an overall task failure.
+ stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
+ else:
+ memset_api = response.json()
+ msg = None
+
+ return(memset_api, msg, stderr)
+
+
+def reload_dns(args=None):
+ '''
+ DNS reloads are a single API call and therefore there's not much
+ which can go wrong outside of auth errors.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ memset_api, msg, stderr = None, None, None
+
+ api_method = 'dns.reload'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['memset_api'] = response.json()
+ retvals['msg'] = msg
+ return(retvals)
+
+ # set changed to true if the reload request was accepted.
+ has_changed = True
+ memset_api = msg
+ # empty msg var as we don't want to return the API's json response twice.
+ msg = None
+
+ if args['poll']:
+ # hand off to the poll function.
+ job_id = response.json()['id']
+ memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
+
+ # assemble return variables.
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ poll=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = reload_dns(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py
new file mode 100644
index 00000000..9ef798bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone
+author: "Simon Weald (@glitchcrab)"
+short_description: Creates and deletes Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+description:
+ - Manage DNS zones in a Memset account.
+options:
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this
+ value has at most 250 characters.
+ type: str
+ aliases: [ nickname ]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+'''
+
+EXAMPLES = '''
+# Create the zone 'test'
+- name: Create zone
+ community.general.memset_zone:
+ name: test
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ ttl: 300
+ delegate_to: localhost
+
+# Force zone deletion
+- name: Force delete zone
+ community.general.memset_zone:
+ name: test
+ state: absent
+ api_key: 5eb86c9196ab03919abcf03857163741
+ force: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Zone info from the Memset API
+ returned: when state == present
+ type: complex
+ contains:
+ domains:
+ description: List of domains in this zone
+ returned: always
+ type: list
+ sample: []
+ id:
+ description: Zone id
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ nickname:
+ description: Zone name
+ returned: always
+ type: str
+ sample: "example.com"
+ records:
+ description: List of DNS records for domains in this zone
+ returned: always
+ type: list
+ sample: []
+ ttl:
+ description: Default TTL for domains in this zone
+ returned: always
+ type: int
+ sample: 300
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ # zone domain length must be less than 250 chars.
+ if len(args['name']) > 250:
+ stderr = 'Zone name must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr, stderr=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+
+ api_method = 'dns.zone_list'
+ has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, counter = check_zone(data=response, name=args['name'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone(args=None, zone_exists=None, payload=None):
+ '''
+ At this point we already know whether the zone exists, so we
+ just need to make the API reflect the desired state.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if not zone_exists:
+ payload['ttl'] = args['ttl']
+ payload['nickname'] = args['name']
+ api_method = 'dns.zone_create'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ else:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ break
+ if zone['ttl'] != args['ttl']:
+ # update the zone if the desired TTL is different.
+ payload['id'] = zone['id']
+ payload['ttl'] = args['ttl']
+ api_method = 'dns.zone_update'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ # populate return var with zone info.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if zone_exists:
+ payload = dict()
+ payload['id'] = zone_id
+ api_method = 'dns.zone_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ memset_api = response.json()
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def delete_zone(args=None, zone_exists=None, payload=None):
+ '''
+ Deletion requires extra sanity checking as the zone cannot be
+ deleted if it contains domains or records. Setting force=true
+ will override this behaviour.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if zone_exists:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ counter = 0
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ counter += 1
+ if counter == 1:
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ zone_id = zone['id']
+ domain_count = len(zone['domains'])
+ record_count = len(zone['records'])
+ if (domain_count > 0 or record_count > 0) and args['force'] is False:
+ # we need to fail out if force was not explicitly set.
+ stderr = 'Zone contains domains or records and force was not used.'
+ has_failed = True
+ has_changed = False
+ module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
+ api_method = 'dns.zone_delete'
+ payload['id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
+ memset_api = msg
+ msg = None
+ else:
+ # zone names are not unique, so we cannot safely delete the requested
+ # zone at this time.
+ has_failed = True
+ has_changed = False
+ msg = 'Unable to delete zone as multiple zones with the same name exist.'
+ else:
+ has_failed, has_changed = False, False
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = _msg
+
+ return(retvals)
+
+ zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if args['state'] == 'present':
+ has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ elif args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, aliases=['nickname'], type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
new file mode 100644
index 00000000..4aa0eada
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_domain
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete domains in Memset DNS zones.
+notes:
+ - Zone domains can be thought of as a collection of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should
+ be created using C(with_items).
+description:
+ - Manage DNS zone domains in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+'''
+
+EXAMPLES = '''
+# Create the zone domain 'test.com'
+- name: Create zone domain
+ community.general.memset_zone_domain:
+ domain: test.com
+ zone: testzone
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Domain info from the Memset API
+ returned: when changed or state == present
+ type: complex
+ contains:
+ domain:
+ description: Domain name
+ returned: always
+ type: str
+ sample: "example.com"
+ id:
+ description: Domain ID
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
+ '''
+ # zone domain length must be less than 250 chars
+ if len(args['domain']) > 250:
+ stderr = 'Zone domain must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+ has_changed = False
+
+ api_method = 'dns.zone_domain_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
+ '''
+ At this point we already know whether the containing zone exists,
+ so we just need to create the domain (or exit if it already exists).
+ '''
+ has_changed, has_failed = False, False
+ msg = None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ for zone_domain in response.json():
+ if zone_domain['domain'] == args['domain']:
+ # zone domain already exists, nothing to change.
+ has_changed = False
+ break
+ else:
+ # we need to create the domain
+ api_method = 'dns.zone_domain_create'
+ payload['domain'] = args['domain']
+ payload['zone_id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ return(has_failed, has_changed, msg)
+
+
+def delete_zone_domain(args=None, payload=None):
+ '''
+ Deletion is pretty simple, domains are always unique so we
+ we don't need to do any sanity checking to avoid deleting the
+ wrong thing.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ if domain_exists:
+ api_method = 'dns.zone_domain_delete'
+ payload['domain'] = args['domain']
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = response.json()
+ # unset msg as we don't want to return unnecessary info to the user.
+ msg = None
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete_domain(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ # the zone needs to be unique - this isn't a requirement of Memset's API but it
+ # makes sense in the context of this module.
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
+
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ return(retvals)
+
+ if args['state'] == 'present':
+ has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
+
+ if args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ domain=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(required=True, type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete_domain(args)
+
+ # we would need to populate the return values with the API's response
+ # in several places so it's easier to do it at the end instead.
+ if not retvals['failed']:
+ if args['state'] == 'present' and not module.check_mode:
+ payload = dict()
+ payload['domain'] = args['domain']
+ api_method = 'dns.zone_domain_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ retvals['memset_api'] = response.json()
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py
new file mode 100644
index 00000000..981d2ac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_record
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete records in Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records
+ should be created using C(with_items).
+description:
+ - Manage DNS records in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ ip, data ]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
+ and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+'''
+
+EXAMPLES = '''
+# Create DNS record for www.domain.com
+- name: Create DNS record
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: A
+ record: www
+ address: 1.2.3.4
+ ttl: 300
+ relative: false
+ delegate_to: localhost
+
+# create an SPF record for domain.com
+- name: Create SPF record for domain.com
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
+ delegate_to: localhost
+
+# create multiple DNS records
+- name: Create multiple DNS records
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ zone: "{{ item.zone }}"
+ type: "{{ item.type }}"
+ record: "{{ item.record }}"
+ address: "{{ item.address }}"
+ delegate_to: localhost
+ with_items:
+ - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
+ - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
+'''
+
+RETURN = '''
+memset_api:
+ description: Record info from the Memset API.
+ returned: when state == present
+ type: complex
+ contains:
+ address:
+ description: Record content (may be an IP, string or blank depending on record type).
+ returned: always
+ type: str
+ sample: 1.1.1.1
+ id:
+ description: Record ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ priority:
+ description: Priority for C(MX) and C(SRV) records.
+ returned: always
+ type: int
+ sample: 10
+ record:
+ description: Name of record.
+ returned: always
+ type: str
+ sample: "www"
+ relative:
+ description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
+ returned: always
+ type: bool
+ sample: False
+ ttl:
+ description: Record TTL.
+ returned: always
+ type: int
+ sample: 10
+ type:
+ description: Record type.
+ returned: always
+ type: str
+ sample: AAAA
+ zone_id:
+ description: Zone ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ failed_validation = False
+
+ # priority can only be integer 0 > 999
+ if not 0 <= args['priority'] <= 999:
+ failed_validation = True
+ error = 'Priority must be in the range 0 > 999 (inclusive).'
+ # data value must be max 250 chars
+ if len(args['address']) > 250:
+ failed_validation = True
+ error = "Address must be less than 250 characters in length."
+ # record value must be max 250 chars
+ if args['record']:
+ if len(args['record']) > 63:
+ failed_validation = True
+ error = "Record must be less than 63 characters in length."
+ # relative isn't used for all record types
+ if args['relative']:
+ if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
+ failed_validation = True
+ error = "Relative is only valid for CNAME, MX, NS and SRV record types."
+ # if any of the above failed then fail early
+ if failed_validation:
+ module.fail_json(failed=True, msg=error)
+
+
+def create_zone_record(args=None, zone_id=None, records=None, payload=None):
+ '''
+ Sanity checking has already occurred prior to this function being
+ called, so we can go ahead and either create or update the record.
+ As defaults are defined for all values in the argument_spec, this
+ may cause some changes to occur as the defaults are enforced (if
+ the user has only configured required variables).
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # assemble the new record.
+ new_record = dict()
+ new_record['zone_id'] = zone_id
+ for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
+ new_record[arg] = args[arg]
+
+ # if we have any matches, update them.
+ if records:
+ for zone_record in records:
+ # record exists, add ID to payload.
+ new_record['id'] = zone_record['id']
+ if zone_record == new_record:
+ # nothing to do; record is already correct so we populate
+ # the return var with the existing record's details.
+ memset_api = zone_record
+ return(has_changed, has_failed, memset_api, msg)
+ else:
+ # merge dicts ensuring we change any updated values
+ payload = zone_record.copy()
+ payload.update(new_record)
+ api_method = 'dns.zone_record_update'
+ if args['check_mode']:
+ has_changed = True
+ # return the new record to the user in the returned var.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+ else:
+ # no record found, so we need to create it
+ api_method = 'dns.zone_record_create'
+ payload = new_record
+ if args['check_mode']:
+ has_changed = True
+ # populate the return var with the new record's details.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def delete_zone_record(args=None, records=None, payload=None):
+ '''
+ Matching records can be cleanly deleted without affecting other
+ resource types, so this is pretty simple to achieve.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # if we have any matches, delete them.
+ if records:
+ for zone_record in records:
+ if args['check_mode']:
+ has_changed = True
+ return(has_changed, has_failed, memset_api, msg)
+ payload['id'] = zone_record['id']
+ api_method = 'dns.zone_record_delete'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = zone_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete functions.
+ Check mode is integrated into the create or delete functions.
+ '''
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+ retvals, payload = dict(), dict()
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone {0} does not exist." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones." . format(args['zone'])
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ retvals['stderr'] = stderr
+ return(retvals)
+
+ # get a list of all records ( as we can't limit records by zone)
+ api_method = 'dns.zone_record_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ # find any matching records
+ records = [record for record in response.json() if record['zone_id'] == zone_id
+ and record['record'] == args['record'] and record['type'] == args['type']]
+
+ if args['state'] == 'present':
+ has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
+
+ if args['state'] == 'absent':
+ has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ zone=dict(required=True, type='str'),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
+ address=dict(required=True, aliases=['ip', 'data'], type='str'),
+ record=dict(required=False, default='', type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(required=False, default=0, type='int'),
+ relative=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # perform some Memset API-specific validation
+ api_validation(args=args)
+
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py
new file mode 100644
index 00000000..1be917dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Kairo Araujo (@kairoaraujo)
+module: mksysb
+short_description: Generates AIX mksysb rootvg backups.
+description:
+ - This module manages a basic AIX mksysb (image) of rootvg.
+options:
+ backup_crypt_files:
+ description:
+ - Backup encrypted files.
+ type: bool
+ default: "yes"
+ backup_dmapi_fs:
+ description:
+ - Back up DMAPI filesystem files.
+ type: bool
+ default: "yes"
+ create_map_files:
+ description:
+ - Creates a new MAP files.
+ type: bool
+ default: "no"
+ exclude_files:
+ description:
+ - Excludes files using C(/etc/rootvg.exclude).
+ type: bool
+ default: "no"
+ exclude_wpar_files:
+ description:
+ - Excludes WPAR files.
+ type: bool
+ default: "no"
+ extended_attrs:
+ description:
+ - Backup extended attributes.
+ type: bool
+ default: "yes"
+ name:
+ type: str
+ description:
+ - Backup name
+ required: true
+ new_image_data:
+ description:
+ - Creates a new file data.
+ type: bool
+ default: "yes"
+ software_packing:
+ description:
+ - Exclude files from packing option listed in
+ C(/etc/exclude_packing.rootvg).
+ type: bool
+ default: "no"
+ storage_path:
+ type: str
+ description:
+ - Storage path where the mksysb will stored.
+ required: true
+ use_snapshot:
+ description:
+ - Creates backup using snapshots.
+ type: bool
+ default: "no"
+'''
+
+EXAMPLES = '''
+- name: Running a backup image mksysb
+ community.general.mksysb:
+ name: myserver
+ storage_path: /repository/images
+ exclude_files: yes
+ exclude_wpar_files: yes
+'''
+
+RETURN = '''
+changed:
+ description: Return changed for mksysb actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backup_crypt_files=dict(type='bool', default=True),
+ backup_dmapi_fs=dict(type='bool', default=True),
+ create_map_files=dict(type='bool', default=False),
+ exclude_files=dict(type='bool', default=False),
+ exclude_wpar_files=dict(type='bool', default=False),
+ extended_attrs=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ new_image_data=dict(type='bool', default=True),
+ software_packing=dict(type='bool', default=False),
+ storage_path=dict(type='str', required=True),
+ use_snapshot=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ # Command options.
+ map_file_opt = {
+ True: '-m',
+ False: ''
+ }
+
+ use_snapshot_opt = {
+ True: '-T',
+ False: ''
+ }
+
+ exclude_files_opt = {
+ True: '-e',
+ False: ''
+ }
+
+ exclude_wpar_opt = {
+ True: '-G',
+ False: ''
+ }
+
+ new_image_data_opt = {
+ True: '-i',
+ False: ''
+ }
+
+ soft_packing_opt = {
+ True: '',
+ False: '-p'
+ }
+
+ extend_attr_opt = {
+ True: '',
+ False: '-a'
+ }
+
+ crypt_files_opt = {
+ True: '',
+ False: '-Z'
+ }
+
+ dmapi_fs_opt = {
+ True: '-a',
+ False: ''
+ }
+
+ backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
+ backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
+ create_map_files = map_file_opt[module.params['create_map_files']]
+ exclude_files = exclude_files_opt[module.params['exclude_files']]
+ exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
+ extended_attrs = extend_attr_opt[module.params['extended_attrs']]
+ name = module.params['name']
+ new_image_data = new_image_data_opt[module.params['new_image_data']]
+ software_packing = soft_packing_opt[module.params['software_packing']]
+ storage_path = module.params['storage_path']
+ use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
+
+ # Validate if storage_path is a valid directory.
+ if os.path.isdir(storage_path):
+ if not module.check_mode:
+ # Generates the mksysb image backup.
+ mksysb_cmd = module.get_bin_path('mksysb', True)
+ rc, mksysb_output, err = module.run_command(
+ "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
+ mksysb_cmd, create_map_files, use_snapshot, exclude_files,
+ exclude_wpar_files, software_packing, extended_attrs,
+ backup_crypt_files, backup_dmapi_fs, new_image_data,
+ storage_path, name))
+ if rc == 0:
+ module.exit_json(changed=True, msg=mksysb_output)
+ else:
+ module.fail_json(msg="mksysb failed.", rc=rc, err=err)
+
+ module.exit_json(changed=True)
+
+ else:
+ module.fail_json(msg="Storage path %s is not valid." % storage_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py
new file mode 100644
index 00000000..0ab75235
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Load or unload kernel modules
+author:
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
+description:
+ - Load or unload kernel modules.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [ absent, present ]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+'''
+
+EXAMPLES = '''
+- name: Add the 802.1q module
+ community.general.modprobe:
+ name: 8021q
+ state: present
+
+- name: Add the dummy module
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+'''
+
+import os.path
+import shlex
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ params=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ params = module.params['params']
+ state = module.params['state']
+
+ # FIXME: Adding all parameters as result values is useless
+ result = dict(
+ changed=False,
+ name=name,
+ params=params,
+ state=state,
+ )
+
+ # Check if module is present
+ try:
+ present = False
+ with open('/proc/modules') as modules:
+ module_name = name.replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ present = True
+ break
+ if not present:
+ command = [module.get_bin_path('uname', True), '-r']
+ rc, uname_kernel_release, err = module.run_command(command)
+ module_file = '/' + name + '.ko'
+ builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(),
+ 'modules.builtin')
+ with open(builtin_path) as builtins:
+ for line in builtins:
+ if line.endswith(module_file):
+ present = True
+ break
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
+
+ # Add/remove module as needed
+ if state == 'present':
+ if not present:
+ if not module.check_mode:
+ command = [module.get_bin_path('modprobe', True), name]
+ command.extend(shlex.split(params))
+ rc, out, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+ elif state == 'absent':
+ if present:
+ if not module.check_mode:
+ rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py
new file mode 100644
index 00000000..1dfe76d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit).
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of service.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ type: str
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ default: 300
+ type: int
+author:
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program httpd to be in started state
+ community.general.monit:
+ name: httpd
+ state: started
+'''
+
+import time
+import re
+
+from collections import namedtuple
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import python_2_unicode_compatible
+
+
+STATE_COMMAND_MAP = {
+ 'stopped': 'stop',
+ 'started': 'start',
+ 'monitored': 'monitor',
+ 'unmonitored': 'unmonitor',
+ 'restarted': 'restart'
+}
+
+MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program',
+ 'Network']
+
+
+@python_2_unicode_compatible
+class StatusValue(namedtuple("Status", "value, is_pending")):
+ MISSING = 'missing'
+ OK = 'ok'
+ NOT_MONITORED = 'not_monitored'
+ INITIALIZING = 'initializing'
+ DOES_NOT_EXIST = 'does_not_exist'
+ EXECUTION_FAILED = 'execution_failed'
+ ALL_STATUS = [
+ MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED
+ ]
+
+ def __new__(cls, value, is_pending=False):
+ return super(StatusValue, cls).__new__(cls, value, is_pending)
+
+ def pending(self):
+ return StatusValue(self.value, True)
+
+ def __getattr__(self, item):
+ if item in ('is_%s' % status for status in self.ALL_STATUS):
+ return self.value == getattr(self, item[3:].upper())
+ raise AttributeError(item)
+
+ def __str__(self):
+ return "%s%s" % (self.value, " (pending)" if self.is_pending else "")
+
+
+class Status(object):
+ MISSING = StatusValue(StatusValue.MISSING)
+ OK = StatusValue(StatusValue.OK)
+ RUNNING = StatusValue(StatusValue.OK)
+ NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED)
+ INITIALIZING = StatusValue(StatusValue.INITIALIZING)
+ DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST)
+ EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED)
+
+
+class Monit(object):
+ def __init__(self, module, monit_bin_path, service_name, timeout):
+ self.module = module
+ self.monit_bin_path = monit_bin_path
+ self.process_name = service_name
+ self.timeout = timeout
+
+ self._monit_version = None
+ self._raw_version = None
+ self._status_change_retry_count = 6
+
+ def monit_version(self):
+ if self._monit_version is None:
+ self._raw_version, version = self._get_monit_version()
+ # Use only major and minor even if there are more these should be enough
+ self._monit_version = version[0], version[1]
+ return self._monit_version
+
+ def _get_monit_version(self):
+ rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True)
+ version_line = out.split('\n')[0]
+ raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
+ return raw_version, tuple(map(int, raw_version.split('.')))
+
+ def exit_fail(self, msg, status=None, **kwargs):
+ kwargs.update({
+ 'msg': msg,
+ 'monit_version': self._raw_version,
+ 'process_status': str(status) if status else None,
+ })
+ self.module.fail_json(**kwargs)
+
+ def exit_success(self, state):
+ self.module.exit_json(changed=True, name=self.process_name, state=state)
+
+ @property
+ def command_args(self):
+ return "-B" if self.monit_version() > (5, 18) else ""
+
+ def get_status(self, validate=False):
+ """Return the status of the process in monit.
+
+ :@param validate: Force monit to re-check the status of the process
+ """
+ monit_command = "validate" if validate else "status"
+ check_rc = False if validate else True # 'validate' always has rc = 1
+ command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name])
+ rc, out, err = self.module.run_command(command, check_rc=check_rc)
+ return self._parse_status(out, err)
+
+ def _parse_status(self, output, err):
+ escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES])
+ pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name))
+ if not re.search(pattern, output, re.IGNORECASE):
+ return Status.MISSING
+
+ status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE)
+ if not status_val:
+ self.exit_fail("Unable to find process status", stdout=output, stderr=err)
+
+ status_val = status_val[0].strip().upper()
+ if ' | ' in status_val:
+ status_val = status_val.split(' | ')[0]
+ if ' - ' not in status_val:
+ status_val = status_val.replace(' ', '_')
+ return getattr(Status, status_val)
+ else:
+ status_val, substatus = status_val.split(' - ')
+ action, state = substatus.split()
+ if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']:
+ status = Status.OK
+ else:
+ status = Status.NOT_MONITORED
+
+ if state == 'pending':
+ status = status.pending()
+ return status
+
+ def is_process_present(self):
+ rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True)
+ return bool(re.findall(r'\b%s\b' % self.process_name, out))
+
+ def is_process_running(self):
+ return self.get_status().is_ok
+
+ def run_command(self, command):
+ """Runs a monit command, and returns the new status."""
+ return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True)
+
+ def wait_for_status_change(self, current_status):
+ running_status = self.get_status()
+ if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED:
+ return running_status
+
+ loop_count = 0
+ while running_status.value == current_status.value:
+ if loop_count >= self._status_change_retry_count:
+ self.exit_fail('waited too long for monit to change state', running_status)
+
+ loop_count += 1
+ time.sleep(0.5)
+ validate = loop_count % 2 == 0 # force recheck of status every second try
+ running_status = self.get_status(validate)
+ return running_status
+
+ def wait_for_monit_to_stop_pending(self, current_status=None):
+ """Fails this run if there is no status or it's pending/initializing for timeout"""
+ timeout_time = time.time() + self.timeout
+
+ if not current_status:
+ current_status = self.get_status()
+ waiting_status = [
+ StatusValue.MISSING,
+ StatusValue.INITIALIZING,
+ StatusValue.DOES_NOT_EXIST,
+ ]
+ while current_status.is_pending or (current_status.value in waiting_status):
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status)
+
+ time.sleep(5)
+ current_status = self.get_status(validate=True)
+ return current_status
+
+ def reload(self):
+ rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path)
+ if rc != 0:
+ self.exit_fail('monit reload failed', stdout=out, stderr=err)
+ self.exit_success(state='reloaded')
+
+ def present(self):
+ self.run_command('reload')
+
+ timeout_time = time.time() + self.timeout
+ while not self.is_process_present():
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for process to become "present"')
+
+ time.sleep(5)
+
+ self.exit_success(state='present')
+
+ def change_state(self, state, expected_status, invert_expected=None):
+ current_status = self.get_status()
+ self.run_command(STATE_COMMAND_MAP[state])
+ status = self.wait_for_status_change(current_status)
+ status = self.wait_for_monit_to_stop_pending(status)
+ status_match = status.value == expected_status.value
+ if invert_expected:
+ status_match = not status_match
+ if status_match:
+ self.exit_success(state=state)
+ self.exit_fail('%s process not %s' % (self.process_name, state), status)
+
+ def stop(self):
+ self.change_state('stopped', Status.NOT_MONITORED)
+
+ def unmonitor(self):
+ self.change_state('unmonitored', Status.NOT_MONITORED)
+
+ def restart(self):
+ self.change_state('restarted', Status.OK)
+
+ def start(self):
+ self.change_state('started', Status.OK)
+
+ def monitor(self):
+ self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ monit = Monit(module, module.get_bin_path('monit', True), name, timeout)
+
+ def exit_if_check_mode():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if state == 'reloaded':
+ exit_if_check_mode()
+ monit.reload()
+
+ present = monit.is_process_present()
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name)
+
+ if state == 'present':
+ if present:
+ module.exit_json(changed=False, name=name, state=state)
+ exit_if_check_mode()
+ monit.present()
+
+ monit.wait_for_monit_to_stop_pending()
+ running = monit.is_process_running()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ exit_if_check_mode()
+ monit.stop()
+
+ if running and state == 'unmonitored':
+ exit_if_check_mode()
+ monit.unmonitor()
+
+ elif state == 'restarted':
+ exit_if_check_mode()
+ monit.restart()
+
+ elif not running and state == 'started':
+ exit_if_check_mode()
+ monit.start()
+
+ elif not running and state == 'monitored':
+ exit_if_check_mode()
+ monit.monitor()
+
+ module.exit_json(changed=False, name=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py
new file mode 100644
index 00000000..3e7938bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+author:
+- "Bruce Pennypacker (@bpennypacker)"
+- "Patrick Humpal (@phumpal)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+ - Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
+options:
+ project_id:
+ description:
+ - Airbrake PROJECT_ID
+ required: false
+ type: str
+ version_added: '0.2.0'
+ project_key:
+ description:
+ - Airbrake PROJECT_KEY.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ type: str
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ type: str
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ type: str
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ required: false
+ type: str
+ version:
+ description:
+ - A string identifying what version was deployed
+ required: false
+ type: str
+ version_added: '1.0.0'
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://api.airbrake.io/api/v4/projects/"
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ token:
+ description:
+ - This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
+ required: false
+ type: str
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify airbrake about an app deployment
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: '4.2'
+
+- name: Notify airbrake about an app deployment, using git hash as revision
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
+ version: '0.2.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=False, no_log=True, type='str'),
+ project_id=dict(required=False, no_log=True, type='str'),
+ project_key=dict(required=False, no_log=True, type='str'),
+ environment=dict(required=True, type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ version=dict(required=False, type='str'),
+ url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ required_together=[('project_id', 'project_key')],
+ mutually_exclusive=[('project_id', 'token')],
+ )
+
+ # Build list of params
+ params = {}
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if module.params["token"]:
+ # v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ # version not supported in v2 API; omit
+
+ module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
+ "it and use 'project_id' and 'project_key' instead",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ params["api_key"] = module.params["token"]
+
+ # Allow sending to Airbrake compliant v2 APIs
+ if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
+ url = 'https://api.airbrake.io/deploys.txt'
+ else:
+ url = module.params["url"]
+
+ # Send the data to airbrake
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+
+ if module.params["project_id"] and module.params["project_key"]:
+ # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
+ if module.params["environment"]:
+ params["environment"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["username"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["repository"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["revision"] = module.params["revision"]
+
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+
+ # Build deploy url
+ url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
+ json_body = module.jsonify(params)
+
+ # Build header
+ headers = {'Content-Type': 'application/json'}
+
+ # Notify Airbrake of deploy
+ response, info = fetch_url(module, url, data=json_body,
+ headers=headers, method='POST')
+
+ if info['status'] == 200 or info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py
new file mode 100644
index 00000000..ea693eb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ type: str
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ aliases: ['name']
+ version:
+ type: str
+ description:
+ - The deployment version.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ state:
+ type: str
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ type: str
+ description:
+ - Name of affected host name. Can be a list.
+ - If not specified, it defaults to the remote system's hostname.
+ required: false
+ aliases: ['host']
+ env:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ type: str
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ type: str
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ type: str
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ deployment_message:
+ type: str
+ description:
+ - Message about the deployment.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ aliases: ['message']
+ version_added: '0.2.0'
+ source_system:
+ type: str
+ description:
+ - Source system used in the requests to the API
+ default: ansible
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
+ register: deployment
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
+'''
+
+# ===========================================
+# Module execution.
+#
+import json
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ deployment_message=dict(required=False, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default=True, type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+ if body.get('hosts') is None:
+ body['hosts'] = [socket.gethostname()]
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py
new file mode 100644
index 00000000..27d23168
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+requirements:
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
+notes:
+ - Check mode isn't supported.
+options:
+ api_key:
+ type: str
+ description:
+ - Circonus API key
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start
+ - If not specified, it defaults to I(now).
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end
+ - If not specified, it defaults to I(now) + I(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation
+ default: 0
+'''
+EXAMPLES = '''
+- name: Create a simple annotation event with a source, defaults to start and end time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+
+- name: Create an annotation with a duration of 5 minutes and a default start time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ duration: 300
+
+- name: Create an annotation with a start_time and end_time
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+
+RETURN = '''
+annotation:
+ description: details about the created annotation
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: annotation identifier
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: creation timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: last modification timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: last modified by
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: category of the created annotation
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: title of the created annotation
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: description of the created annotation
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: timestamp, since annotation applies
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: timestamp, since annotation ends
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+'''
+import json
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def check_requests_dep(module):
+ """Check if an adequate requests version is available"""
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ else:
+ required_version = '2.0.0' if PY3 else '1.0.0'
+ if LooseVersion(requests.__version__) < LooseVersion(required_version):
+ module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
+
+
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ duration = module.params['duration']
+ if module.params['start'] is not None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] is not None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time()) + duration
+ annotation['start'] = start
+ annotation['stop'] = stop
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+
+
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(type='int'),
+ stop=dict(type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(default=0, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+
+ check_requests_dep(module)
+
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException as e:
+ module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=True, annotation=resp.json())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py
new file mode 100644
index 00000000..a6327dde
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to Datadog service
+description:
+- "Allows to post events to Datadog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+options:
+ api_key:
+ type: str
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ type: str
+ description: ["Your DataDog app key."]
+ required: true
+ title:
+ type: str
+ description: ["The event title."]
+ required: true
+ text:
+ type: str
+ description: ["The body of the event."]
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description: ["The priority of the event."]
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ tags:
+ type: list
+ description: ["Comma separated list of tags to apply to the event."]
+ alert_type:
+ type: str
+ description: ["Type of alert."]
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description: ["An arbitrary string to use for aggregation."]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Post an event with low priority
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+
+- name: Post an event with several tags
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
+'''
+
+import platform
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ host=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ if module.params['host'] is None:
+ module.params['host'] = platform.node().split('.')[0]
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ host=module.params['host'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py
new file mode 100644
index 00000000..f6020c2b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+ - Manages monitors within Datadog.
+ - Options as described on https://docs.datadoghq.com/api/.
+author: Sebastian Kornehl (@skornehl)
+requirements: [datadog]
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ type:
+ description:
+ - The type of the monitor.
+ choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert']
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ type: str
+ aliases: [ 'message' ]
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ default: ""
+ notify_no_data:
+ description:
+ - Whether this monitor will notify when data stops reporting.
+ type: bool
+ default: 'no'
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status.
+ - It will only re-notify if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if I(renotify_interval=None).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users will be notified on changes to this monitor.
+ type: bool
+ default: 'no'
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: 'no'
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, will be used instead of the name to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+'''
+
+EXAMPLES = '''
+- name: Create a metric monitor
+ community.general.datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
+ notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Deletes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Mutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Unmutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Use datadoghq.eu platform instead of datadoghq.com
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_host: https://api.datadoghq.eu
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(required=False),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert', 'log alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ notification_message=dict(required=False, no_log=True, default=None, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool'),
+ require_full_window=dict(required=False, default=None, type='bool'),
+ new_host_delay=dict(required=False, default=None),
+ evaluation_delay=dict(required=False, default=None),
+ id=dict(required=False),
+ include_tags=dict(required=False, default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ if 'message' in module.params:
+ module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'")
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'api_host': module.params['api_host'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ # Check if api_key and app_key is correct or not
+ # if not, then fail here.
+ response = api.Monitor.get_all()
+ if isinstance(response, dict):
+ msg = response.get('errors', None)
+ if msg:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
+
+def _get_monitor(module):
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == _fix_template_vars(module.params['name']):
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window": module.params['require_full_window'],
+ "new_host_delay": module.params['new_host_delay'],
+ "evaluation_delay": module.params['evaluation_delay'],
+ "include_tags": module.params['include_tags'],
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py
new file mode 100644
index 00000000..0b96af04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ environment:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ type: str
+ description:
+ - The username of the person doing the deployment
+ repo:
+ type: str
+ description:
+ - URL of the project repository
+ revision:
+ type: str
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+'''
+
+EXAMPLES = '''
+- name: Notify Honeybadger.io about an app deployment
+ community.general.honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py
new file mode 100644
index 00000000..b59c0e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Copyright (c) 2018, Ansible Project
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_feature
+
+short_description: Manage Icinga2 feature
+description:
+ - This module can be used to enable or disable an Icinga2 feature.
+author: "Loic Blot (@nerzhul)"
+options:
+ name:
+ type: str
+ description:
+ - This is the feature name to enable or disable.
+ required: True
+ state:
+ type: str
+ description:
+ - If set to C(present) and feature is disabled, then feature is enabled.
+ - If set to C(present) and feature is already enabled, then nothing is changed.
+ - If set to C(absent) and feature is enabled, then feature is disabled.
+ - If set to C(absent) and feature is already disabled, then nothing is changed.
+ choices: [ "present", "absent" ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Enable ido-pgsql feature
+ community.general.icinga2_feature:
+ name: ido-pgsql
+ state: present
+
+- name: Disable api feature
+ community.general.icinga2_feature:
+ name: api
+ state: absent
+'''
+
+RETURN = '''
+#
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Icinga2FeatureHelper:
+ def __init__(self, module):
+ self.module = module
+ self._icinga2 = module.get_bin_path('icinga2', True)
+ self.feature_name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ def _exec(self, args):
+ cmd = [self._icinga2, 'feature']
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return rc, out
+
+ def manage(self):
+ rc, out = self._exec(["list"])
+ if rc != 0:
+ self.module.fail_json(msg="Unable to list icinga2 features. "
+ "Ensure icinga2 is installed and present in binary path.")
+
+ # If feature is already in good state, just exit
+ if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
+ (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
+ self.module.exit_json(changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ feature_enable_str = "enable" if self.state == "present" else "disable"
+
+ rc, out = self._exec([feature_enable_str, self.feature_name])
+
+ change_applied = False
+ if self.state == "present":
+ if rc != 0:
+ self.module.fail_json(msg="Failed to %s feature %s."
+ " icinga2 command returned %s" % (feature_enable_str,
+ self.feature_name,
+ out))
+
+ if re.search("already enabled", out) is None:
+ change_applied = True
+ else:
+ if rc == 0:
+ change_applied = True
+ # RC is not 0 for this already disabled feature, handle it as no change applied
+ elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
+ change_applied = False
+ else:
+ self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
+
+ self.module.exit_json(changed=change_applied)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=["present", "absent"], default="present")
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+ Icinga2FeatureHelper(module).manage()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py
new file mode 100644
index 00000000..65c95812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is proudly sponsored by CGI (www.cgi.com) and
+# KPN (www.kpn.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_host
+short_description: Manage a host in Icinga2
+description:
+ - "Add or remove a host to Icinga2 through the API."
+ - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
+author: "Jurgen Brand (@t794104)"
+options:
+ url:
+ type: str
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ url_username:
+ type: str
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ url_password:
+ type: str
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ type: bool
+ default: 'no'
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client
+ authentication. This file can also include the key as well, and if
+ the key is included, C(client_key) is not required.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL
+ client authentication. If C(client_cert) contains both the certificate
+ and key, this option is not required.
+ state:
+ type: str
+ description:
+ - Apply feature state.
+ choices: [ "present", "absent" ]
+ default: present
+ name:
+ type: str
+ description:
+ - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
+ required: true
+ zone:
+ type: str
+ description:
+ - The zone from where this host should be polled.
+ template:
+ type: str
+ description:
+ - The template used to define the host.
+ - Template cannot be modified after object creation.
+ check_command:
+ type: str
+ description:
+ - The command used to check if the host is alive.
+ default: "hostalive"
+ display_name:
+ type: str
+ description:
+ - The name used to display the host.
+ - If not specified, it defaults to the value of the I(name) parameter.
+ ip:
+ type: str
+ description:
+ - The IP address of the host.
+ required: true
+ variables:
+ type: dict
+ description:
+ - Dictionary of variables.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Add host to icinga
+ community.general.icinga2_host:
+ url: "https://icinga2.example.com"
+ url_username: "ansible"
+ url_password: "a_secret"
+ state: present
+ name: "{{ ansible_fqdn }}"
+ ip: "{{ ansible_default_ipv4.address }}"
+ variables:
+ foo: "bar"
+ delegate_to: 127.0.0.1
+'''
+
+RETURN = '''
+name:
+ description: The name used to create, modify or delete the host
+ type: str
+ returned: always
+data:
+ description: The data structure used for create, modify or delete of the host
+ type: dict
+ returned: always
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+# ===========================================
+# Icinga2 API class
+#
+class icinga2_api:
+ module = None
+
+ def __init__(self, module):
+ self.module = module
+
+ def call_url(self, path, data='', method='GET'):
+ headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': method,
+ }
+ url = self.module.params.get("url") + "/" + path
+ rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy'])
+ body = ''
+ if rsp:
+ body = json.loads(rsp.read())
+ if info['status'] >= 400:
+ body = info['body']
+ return {'code': info['status'], 'data': body}
+
+ def check_connection(self):
+ ret = self.call_url('v1/status')
+ if ret['code'] == 200:
+ return True
+ return False
+
+ def exists(self, hostname):
+ data = {
+ "filter": "match(\"" + hostname + "\", host.name)",
+ }
+ ret = self.call_url(
+ path="v1/objects/hosts",
+ data=self.module.jsonify(data)
+ )
+ if ret['code'] == 200:
+ if len(ret['data']['results']) == 1:
+ return True
+ return False
+
+ def create(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="PUT"
+ )
+ return ret
+
+ def delete(self, hostname):
+ data = {"cascade": 1}
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="DELETE"
+ )
+ return ret
+
+ def modify(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="POST"
+ )
+ return ret
+
+ def diff(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ method="GET"
+ )
+ changed = False
+ ic_data = ret['data']['results'][0]
+ for key in data['attrs']:
+ if key not in ic_data['attrs'].keys():
+ changed = True
+ elif data['attrs'][key] != ic_data['attrs'][key]:
+ changed = True
+ return changed
+
+
+# ===========================================
+# Module execution.
+#
+def main():
+ # use the predefined argument spec for url
+ argument_spec = url_argument_spec()
+ # add our own arguments
+ argument_spec.update(
+ state=dict(default="present", choices=["absent", "present"]),
+ name=dict(required=True, aliases=['host']),
+ zone=dict(),
+ template=dict(default=None),
+ check_command=dict(default="hostalive"),
+ display_name=dict(default=None),
+ ip=dict(required=True),
+ variables=dict(type='dict', default=None),
+ )
+
+ # Define the main module
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ name = module.params["name"]
+ zone = module.params["zone"]
+ template = [name]
+ if module.params["template"]:
+ template.append(module.params["template"])
+ check_command = module.params["check_command"]
+ ip = module.params["ip"]
+ display_name = module.params["display_name"]
+ if not display_name:
+ display_name = name
+ variables = module.params["variables"]
+
+ try:
+ icinga = icinga2_api(module=module)
+ icinga.check_connection()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
+
+ data = {
+ 'attrs': {
+ 'address': ip,
+ 'display_name': display_name,
+ 'check_command': check_command,
+ 'zone': zone,
+ 'vars': {
+ 'made_by': "ansible",
+ },
+ 'templates': template,
+ }
+ }
+
+ if variables:
+ data['attrs']['vars'].update(variables)
+
+ changed = False
+ if icinga.exists(name):
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=True, name=name, data=data)
+ else:
+ try:
+ ret = icinga.delete(name)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception deleting host: " + str(e))
+
+ elif icinga.diff(name, data):
+ if module.check_mode:
+ module.exit_json(changed=False, name=name, data=data)
+
+ # Template attribute is not allowed in modification
+ del data['attrs']['templates']
+
+ ret = icinga.modify(name, data)
+
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data']))
+
+ else:
+ if state == "present":
+ if module.check_mode:
+ changed = True
+ else:
+ try:
+ ret = icinga.create(name, data)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception creating host: " + str(e))
+
+ module.exit_json(changed=changed, name=name, data=data)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py
new file mode 100644
index 00000000..d0fd406d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+author: "Seth Edwards (@Sedward)"
+requirements: []
+options:
+ user:
+ type: str
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account api key
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ type: list
+ description:
+ - See examples
+'''
+
+EXAMPLES = '''
+- name: Create a simple annotation event with a source
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
+
+- name: Create an annotation that includes a link
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
+ links:
+ - rel: example
+ href: http://www.example.com/deploy
+
+- name: Create an annotation with a start_time and end_time
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] is not None:
+ params['source'] = module.params['source']
+ if module.params['description'] is not None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] is not None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] is not None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] is not None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ response_code = str(info['status'])
+ response_body = info['body']
+ if info['status'] != 201:
+ if info['status'] >= 400:
+ module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
+ else:
+ module.fail_json(msg="Request Failed. Response code: " + response_code)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(required=False, default=None, type='int'),
+ links=dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py
new file mode 100644
index 00000000..8f39fb51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+options:
+ path:
+ type: str
+ description:
+ - path to a log file
+ required: true
+ state:
+ type: str
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - name of the log
+ required: false
+ logtype:
+ type: str
+ description:
+ - type of the log
+ required: false
+ aliases: [type]
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- name: Track nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+- name: Stop tracking nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/error.log
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name', name])
+ if logtype:
+ cmd.extend(['--type', logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py
new file mode 100644
index 00000000..4a45c04a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logstash_plugin
+short_description: Manage Logstash plugins
+description:
+ - Manages Logstash plugins.
+author: Loic Blot (@nerzhul)
+options:
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: True
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify plugin Version of the plugin to install.
+ If plugin exists with previous version, it will NOT be updated.
+'''
+
+EXAMPLES = '''
+- name: Install Logstash beats input plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+
+- name: Install specific version of a plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-syslog
+ version: '3.2.0'
+
+- name: Uninstall Logstash plugin
+ community.general.logstash_plugin:
+ state: absent
+ name: logstash-filter-multiline
+
+- name: Install Logstash plugin with alternate heap size
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+ environment:
+ LS_JAVA_OPTS: "-Xms256m -Xmx256m"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+
+def is_plugin_present(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "list", plugin_name]
+ rc, out, err = module.run_command(" ".join(cmd_args))
+ return rc == 0
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ cmd_args.append("--version %s" % version)
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ plugin_bin = module.params["plugin_bin"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(module, plugin_bin, name)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py
new file mode 100644
index 00000000..1dfe76d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit).
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of service.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ type: str
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ default: 300
+ type: int
+author:
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program httpd to be in started state
+ community.general.monit:
+ name: httpd
+ state: started
+'''
+
+import time
+import re
+
+from collections import namedtuple
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import python_2_unicode_compatible
+
+
+STATE_COMMAND_MAP = {
+ 'stopped': 'stop',
+ 'started': 'start',
+ 'monitored': 'monitor',
+ 'unmonitored': 'unmonitor',
+ 'restarted': 'restart'
+}
+
+MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program',
+ 'Network']
+
+
+@python_2_unicode_compatible
+class StatusValue(namedtuple("Status", "value, is_pending")):
+ MISSING = 'missing'
+ OK = 'ok'
+ NOT_MONITORED = 'not_monitored'
+ INITIALIZING = 'initializing'
+ DOES_NOT_EXIST = 'does_not_exist'
+ EXECUTION_FAILED = 'execution_failed'
+ ALL_STATUS = [
+ MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED
+ ]
+
+ def __new__(cls, value, is_pending=False):
+ return super(StatusValue, cls).__new__(cls, value, is_pending)
+
+ def pending(self):
+ return StatusValue(self.value, True)
+
+ def __getattr__(self, item):
+ if item in ('is_%s' % status for status in self.ALL_STATUS):
+ return self.value == getattr(self, item[3:].upper())
+ raise AttributeError(item)
+
+ def __str__(self):
+ return "%s%s" % (self.value, " (pending)" if self.is_pending else "")
+
+
+class Status(object):
+ MISSING = StatusValue(StatusValue.MISSING)
+ OK = StatusValue(StatusValue.OK)
+ RUNNING = StatusValue(StatusValue.OK)
+ NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED)
+ INITIALIZING = StatusValue(StatusValue.INITIALIZING)
+ DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST)
+ EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED)
+
+
+class Monit(object):
+ def __init__(self, module, monit_bin_path, service_name, timeout):
+ self.module = module
+ self.monit_bin_path = monit_bin_path
+ self.process_name = service_name
+ self.timeout = timeout
+
+ self._monit_version = None
+ self._raw_version = None
+ self._status_change_retry_count = 6
+
+ def monit_version(self):
+ if self._monit_version is None:
+ self._raw_version, version = self._get_monit_version()
+ # Use only major and minor even if there are more these should be enough
+ self._monit_version = version[0], version[1]
+ return self._monit_version
+
+ def _get_monit_version(self):
+ rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True)
+ version_line = out.split('\n')[0]
+ raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
+ return raw_version, tuple(map(int, raw_version.split('.')))
+
+ def exit_fail(self, msg, status=None, **kwargs):
+ kwargs.update({
+ 'msg': msg,
+ 'monit_version': self._raw_version,
+ 'process_status': str(status) if status else None,
+ })
+ self.module.fail_json(**kwargs)
+
+ def exit_success(self, state):
+ self.module.exit_json(changed=True, name=self.process_name, state=state)
+
+ @property
+ def command_args(self):
+ return "-B" if self.monit_version() > (5, 18) else ""
+
+ def get_status(self, validate=False):
+ """Return the status of the process in monit.
+
+ :@param validate: Force monit to re-check the status of the process
+ """
+ monit_command = "validate" if validate else "status"
+ check_rc = False if validate else True # 'validate' always has rc = 1
+ command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name])
+ rc, out, err = self.module.run_command(command, check_rc=check_rc)
+ return self._parse_status(out, err)
+
+ def _parse_status(self, output, err):
+ escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES])
+ pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name))
+ if not re.search(pattern, output, re.IGNORECASE):
+ return Status.MISSING
+
+ status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE)
+ if not status_val:
+ self.exit_fail("Unable to find process status", stdout=output, stderr=err)
+
+ status_val = status_val[0].strip().upper()
+ if ' | ' in status_val:
+ status_val = status_val.split(' | ')[0]
+ if ' - ' not in status_val:
+ status_val = status_val.replace(' ', '_')
+ return getattr(Status, status_val)
+ else:
+ status_val, substatus = status_val.split(' - ')
+ action, state = substatus.split()
+ if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']:
+ status = Status.OK
+ else:
+ status = Status.NOT_MONITORED
+
+ if state == 'pending':
+ status = status.pending()
+ return status
+
+ def is_process_present(self):
+ rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True)
+ return bool(re.findall(r'\b%s\b' % self.process_name, out))
+
+ def is_process_running(self):
+ return self.get_status().is_ok
+
+ def run_command(self, command):
+ """Runs a monit command, and returns the new status."""
+ return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True)
+
+ def wait_for_status_change(self, current_status):
+ running_status = self.get_status()
+ if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED:
+ return running_status
+
+ loop_count = 0
+ while running_status.value == current_status.value:
+ if loop_count >= self._status_change_retry_count:
+ self.exit_fail('waited too long for monit to change state', running_status)
+
+ loop_count += 1
+ time.sleep(0.5)
+ validate = loop_count % 2 == 0 # force recheck of status every second try
+ running_status = self.get_status(validate)
+ return running_status
+
+ def wait_for_monit_to_stop_pending(self, current_status=None):
+ """Fails this run if there is no status or it's pending/initializing for timeout"""
+ timeout_time = time.time() + self.timeout
+
+ if not current_status:
+ current_status = self.get_status()
+ waiting_status = [
+ StatusValue.MISSING,
+ StatusValue.INITIALIZING,
+ StatusValue.DOES_NOT_EXIST,
+ ]
+ while current_status.is_pending or (current_status.value in waiting_status):
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status)
+
+ time.sleep(5)
+ current_status = self.get_status(validate=True)
+ return current_status
+
+ def reload(self):
+ rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path)
+ if rc != 0:
+ self.exit_fail('monit reload failed', stdout=out, stderr=err)
+ self.exit_success(state='reloaded')
+
+ def present(self):
+ self.run_command('reload')
+
+ timeout_time = time.time() + self.timeout
+ while not self.is_process_present():
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for process to become "present"')
+
+ time.sleep(5)
+
+ self.exit_success(state='present')
+
+ def change_state(self, state, expected_status, invert_expected=None):
+ current_status = self.get_status()
+ self.run_command(STATE_COMMAND_MAP[state])
+ status = self.wait_for_status_change(current_status)
+ status = self.wait_for_monit_to_stop_pending(status)
+ status_match = status.value == expected_status.value
+ if invert_expected:
+ status_match = not status_match
+ if status_match:
+ self.exit_success(state=state)
+ self.exit_fail('%s process not %s' % (self.process_name, state), status)
+
+ def stop(self):
+ self.change_state('stopped', Status.NOT_MONITORED)
+
+ def unmonitor(self):
+ self.change_state('unmonitored', Status.NOT_MONITORED)
+
+ def restart(self):
+ self.change_state('restarted', Status.OK)
+
+ def start(self):
+ self.change_state('started', Status.OK)
+
+ def monitor(self):
+ self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ monit = Monit(module, module.get_bin_path('monit', True), name, timeout)
+
+ def exit_if_check_mode():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if state == 'reloaded':
+ exit_if_check_mode()
+ monit.reload()
+
+ present = monit.is_process_present()
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name)
+
+ if state == 'present':
+ if present:
+ module.exit_json(changed=False, name=name, state=state)
+ exit_if_check_mode()
+ monit.present()
+
+ monit.wait_for_monit_to_stop_pending()
+ running = monit.is_process_running()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ exit_if_check_mode()
+ monit.stop()
+
+ if running and state == 'unmonitored':
+ exit_if_check_mode()
+ monit.unmonitor()
+
+ elif state == 'restarted':
+ exit_if_check_mode()
+ monit.restart()
+
+ elif not running and state == 'started':
+ exit_if_check_mode()
+ monit.start()
+
+ elif not running and state == 'monitored':
+ exit_if_check_mode()
+ monit.monitor()
+
+ module.exit_json(changed=False, name=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py
new file mode 100644
index 00000000..248fd105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py
@@ -0,0 +1,1304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - The C(nagios) module is not idempotent.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ e.g., C(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ type: str
+ host:
+ description:
+ - Host to operate on in Nagios.
+ type: str
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ type: str
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) and C(acknowledge) action.
+ type: str
+ default: Ansible
+ comment:
+ description:
+ - Comment for C(downtime) and C(acknowledge)action.
+ type: str
+ default: Scheduling downtime
+ start:
+ description:
+ - When downtime should start, in time_t format (epoch seconds).
+ version_added: '0.2.0'
+ type: str
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ type: int
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ type: str
+ servicegroup:
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ type: str
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ type: str
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+- name: Set 30 minutes of apache downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00
+ community.general.nagios:
+ action: downtime
+ start: 1555984800
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime, with a comment describing the reason
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
+
+- name: Schedule downtime for ALL services on HOST
+ community.general.nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule downtime for a few services
+ community.general.nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all services in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all host in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Delete all downtime for a given host
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+- name: Delete all downtime for HOST with a particular comment
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
+
+- name: Acknowledge an HOST with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: 'power outage - see casenr 12345'
+
+- name: Acknowledge an active service problem for the httpd service with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: httpd
+ host: '{{ inventory_hostname }}'
+ comment: 'service crashed - see casenr 12345'
+
+- name: Reset a passive service check for snmp trap
+ community.general.nagios:
+ action: forced_check
+ service: snmp
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for the httpd service
+ community.general.nagios:
+ action: forced_check
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for all services of a particular host
+ community.general.nagios:
+ action: forced_check
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for a particular host
+ community.general.nagios:
+ action: forced_check
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Enable SMART disk alerts
+ community.general.nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
+
+- name: Disable httpd and nfs alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
+
+- name: Disable HOST alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Silence ALL alerts
+ community.general.nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
+
+- name: Unsilence all alerts
+ community.general.nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
+
+- name: Shut up nagios
+ community.general.nagios:
+ action: silence_nagios
+
+- name: Annoy me negios
+ community.general.nagios:
+ action: unsilence_nagios
+
+- name: Command something
+ community.general.nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
+'''
+
+import time
+import os.path
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+######################################################################
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ 'acknowledge',
+ 'forced_check',
+ ]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ start=dict(required=False, default=None),
+ minutes=dict(default=30, type='int'),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ start = module.params['start']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # acknowledge = (service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ######################################################################
+ if action == 'acknowledge':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to acknowledge')
+
+ ##################################################################
+ if action == 'forced_check':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to check')
+
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ if kwargs['start'] is not None:
+ self.start = int(kwargs['start'])
+ else:
+ self.start = None
+ self.minutes = kwargs['minutes']
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ if not os.path.exists(self.cmdfile):
+ self.module.fail_json(msg='nagios command file does not exist',
+ cmdfile=self.cmdfile)
+ if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode):
+ self.module.fail_json(msg='nagios command file is not a fifo file',
+ cmdfile=self.cmdfile)
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_ack_str(self, cmd, host, author=None,
+ comment=None, svc=None, sticky=0, notify=1, persistent=0):
+ """
+ Format an external-command acknowledge string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ svc - Service to schedule downtime for, omit when for host downtime
+ sticky - the acknowledgement will remain until the host returns to an UP state if set to 1
+ notify - a notification will be sent out to contacts
+ persistent - survive across restarts of the Nagios process
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ ack_args = [str(sticky), str(notify), str(persistent), author, comment]
+
+ ack_arg_str = ";".join(ack_args)
+ ack_str = hdr + ack_arg_str + "\n"
+
+ return ack_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_chk_str(self, cmd, host, svc=None, start=None):
+ """
+ Format an external-command forced host or service check string.
+
+ cmd - Nagios command ID
+ host - Host to check service from
+ svc - Service to check
+ start - check time
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>];<check_time>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if start is None:
+ start = entry_time + 3
+
+ if svc is None:
+ chk_args = [str(start)]
+ else:
+ chk_args = [svc, str(start)]
+
+ chk_arg_str = ";".join(chk_args)
+ chk_str = hdr + chk_arg_str + "\n"
+
+ return chk_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def acknowledge_svc_problem(self, host, services=None):
+ """
+ This command is used to acknowledge a particular
+ service problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_SVC_PROBLEM"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service)
+ self._write_command(ack_cmd_str)
+
+ def acknowledge_host_problem(self, host):
+ """
+ This command is used to acknowledge a particular
+ host problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;
+ <persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_HOST_PROBLEM"
+ ack_cmd_str = self._fmt_ack_str(cmd, host)
+ self._write_command(ack_cmd_str)
+
+ def schedule_forced_host_check(self, host):
+ """
+ This command schedules a forced active check for a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_CHECK"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_host_svc_check(self, host):
+ """
+ This command schedules a forced active check for all services
+ associated with a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_svc_check(self, host, services=None):
+ """
+ This command schedules a forced active check for a particular
+ service.
+
+ Syntax: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_SVC_CHECK"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service)
+ self._write_command(chk_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes,
+ start=self.start)
+
+ elif self.action == 'acknowledge':
+ if self.services == 'host':
+ self.acknowledge_host_problem(self.host)
+ else:
+ self.acknowledge_svc_problem(self.host, services=self.services)
+
+ elif self.action == 'delete_downtime':
+ if self.services == 'host':
+ self.delete_host_downtime(self.host)
+ elif self.services == 'all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == 'forced_check':
+ if self.services == 'host':
+ self.schedule_forced_host_check(self.host)
+ elif self.services == 'all':
+ self.schedule_forced_host_svc_check(self.host)
+ else:
+ self.schedule_forced_svc_check(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" %
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py
new file mode 100644
index 00000000..af953e0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ type: str
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ type: str
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ type: str
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ type: str
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ type: str
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ type: str
+ description:
+ - Name of the application
+ required: false
+ environment:
+ type: str
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify newrelic about an app deployment
+ community.general.newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py
new file mode 100644
index 00000000..306b596b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+ - "Bruce Pennypacker (@bpennypacker)"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ type: str
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ choices: [ "running", "started", "ongoing", "absent" ]
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ user:
+ type: str
+ description:
+ - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ token:
+ type: str
+ description:
+ - A pagerduty token, generated on the pagerduty site. It is used for authorization.
+ required: true
+ requester_id:
+ type: str
+ description:
+ - ID of user making the request. Only needed when creating a maintenance_window.
+ service:
+ type: list
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ aliases: [ services ]
+ window_id:
+ type: str
+ description:
+ - ID of maintenance window. Only needed when absent a maintenance_window.
+ hours:
+ type: str
+ description:
+ - Length of maintenance window in hours.
+ default: '1'
+ minutes:
+ type: str
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ default: '0'
+ desc:
+ type: str
+ description:
+ - Short description of maintenance window.
+ default: Created by Ansible
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: List ongoing maintenance windows using a token
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
+
+- name: Create a 1 hour maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ token: yourtoken
+ state: running
+ service: FOO123
+
+- name: Create a 5 minute maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
+
+
+- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment"
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
+ register: pd_window
+
+- name: Delete the previous maintenance window
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: absent
+ window_id: '{{ pd_window.result.maintenance_window.id }}'
+
+# Delete a maintenance window from a separate playbook than its creation,
+# and if it is the only existing maintenance window
+- name: Check
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: ongoing
+ register: pd_window
+
+- name: Delete
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: absent
+ window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
+'''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class PagerDutyRequest(object):
+ def __init__(self, module, name, user, token):
+ self.module = module
+ self.name = name
+ self.user = user
+ self.token = token
+ self.headers = {
+ 'Content-Type': 'application/json',
+ "Authorization": self._auth_header(),
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ def ongoing(self, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, False
+
+ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
+ if not requester_id:
+ self.module.fail_json(msg="requester_id is required when maintenance window should be created")
+
+ url = 'https://api.pagerduty.com/maintenance_windows'
+
+ headers = dict(self.headers)
+ headers.update({'From': requester_id})
+
+ start, end = self._compute_start_end_time(hours, minutes)
+ services = self._create_services_payload(service)
+
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
+
+ data = json.dumps(request_data)
+ response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 201:
+ self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _create_services_payload(self, service):
+ if (isinstance(service, list)):
+ return [{'id': s, 'type': 'service_reference'} for s in service]
+ else:
+ return [{'id': service, 'type': 'service_reference'}]
+
+ def _compute_start_end_time(self, hours, minutes):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return start, end
+
+ def absent(self, window_id, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows/" + window_id
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers, method='DELETE')
+ if info['status'] != 204:
+ self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _auth_header(self):
+ return "Token token=%s" % self.token
+
+ def _read_response(self, response):
+ try:
+ return json.loads(response.read())
+ except Exception:
+ return ""
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=False),
+ user=dict(required=False),
+ token=dict(required=True, no_log=True),
+ service=dict(required=False, type='list', aliases=["services"]),
+ window_id=dict(required=False),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False), # @TODO change to int?
+ minutes=dict(default='0', required=False), # @TODO change to int?
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ service = module.params['service']
+ window_id = module.params['window_id']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ pd = PagerDutyRequest(module, name, user, token)
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed = True
+
+ if state == "ongoing":
+ (rc, out, changed) = pd.ongoing()
+
+ if state == "absent":
+ (rc, out, changed) = pd.absent(window_id)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py
new file mode 100644
index 00000000..736ada5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+author:
+ - "Amanpreet Singh (@ApsOps)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ service_id:
+ type: str
+ description:
+ - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ required: true
+ service_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ state:
+ type: str
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ type: str
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ type: str
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Trigger an incident with just the basic options
+ community.general.pagerduty_alert:
+ name: companyabc
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+
+- name: Acknowledge an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: acknowledged
+ incident_key: somekey
+ desc: "some text for incident's log"
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: resolved
+ incident_key: somekey
+ desc: "some text for incident's log"
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+
+
+def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
+ url = 'https://api.pagerduty.com/incidents'
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key,
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ params = {
+ 'service_ids[]': service_id,
+ 'sort_by': 'incident_number:desc',
+ 'time_zone': 'UTC'
+ }
+ if incident_key:
+ params['incident_key'] = incident_key
+
+ url_parts = list(urlparse(url))
+ url_parts[4] = urlencode(params, True)
+
+ url = urlunparse(url_parts)
+
+ response, info = http_call(module, url, method='get', headers=headers)
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+
+ incidents = json.loads(response.read())["incidents"]
+ msg = "No corresponding incident"
+
+ if len(incidents) == 0:
+ if state in ('acknowledged', 'resolved'):
+ return msg, False
+ return msg, True
+ elif state != incidents[0]["status"]:
+ return incidents[0], True
+
+ return incidents[0], False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ service_id=dict(required=True),
+ service_key=dict(required=False, no_log=True),
+ integration_key=dict(required=False, no_log=True),
+ api_key=dict(required=True, no_log=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_id = module.params['service_id']
+ integration_key = module.params['integration_key']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ if integration_key is None:
+ if service_key is not None:
+ integration_key = service_key
+ module.warn('"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead')
+ else:
+ module.fail_json(msg="'integration_key' is required parameter")
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py
new file mode 100644
index 00000000..358a6961
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: pagerduty_change
+short_description: Track a code or infrastructure change as a PagerDuty change event
+version_added: 1.3.0
+description:
+ - This module will let you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event will be created each time it is run.
+author:
+ - Adam Vaughan (@adamvaughan)
+requirements:
+ - PagerDuty integration key
+options:
+ integration_key:
+ description:
+ - The integration key that identifies the service the change was made to.
+ This can be found by adding an integration to a service in PagerDuty.
+ required: true
+ type: str
+ summary:
+ description:
+ - A short description of the change that occurred.
+ required: true
+ type: str
+ source:
+ description:
+ - The source of the change event.
+ default: Ansible
+ type: str
+ user:
+ description:
+ - The name of the user or process that triggered this deployment.
+ type: str
+ repo:
+ description:
+ - The URL of the project repository.
+ required: false
+ type: str
+ revision:
+ description:
+ - An identifier of the revision being deployed, typically a number or SHA from a version control system.
+ required: false
+ type: str
+ environment:
+ description:
+ - The environment name, typically C(production), C(staging), etc.
+ required: false
+ type: str
+ link_url:
+ description:
+ - A URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ link_text:
+ description:
+ - Descriptive text for a URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ url:
+ description:
+ - URL to submit the change event to.
+ required: false
+ default: https://events.pagerduty.com/v2/change/enqueue
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target URL will not be validated.
+ This should only be used on personally controlled sites using self-signed certificates.
+ required: false
+ default: yes
+ type: bool
+notes:
+ - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+'''
+
+EXAMPLES = '''
+- name: Track the deployment as a PagerDuty change event
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+
+- name: Track the deployment as a PagerDuty change event with more details
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+ source: Ansible Deploy
+ user: ansible
+ repo: github.com/ansible/ansible
+ revision: '4.2'
+ environment: production
+ link_url: https://github.com/ansible-collections/community.general/pull/1269
+ link_text: View changes on GitHub
+'''
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+from datetime import datetime
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ integration_key=dict(required=True, type='str', no_log=True),
+ summary=dict(required=True, type='str'),
+ source=dict(required=False, default='Ansible', type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ environment=dict(required=False, type='str'),
+ link_url=dict(required=False, type='str'),
+ link_text=dict(required=False, type='str'),
+ url=dict(required=False,
+ default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
+
+ url = module.params['url']
+ headers = {'Content-Type': 'application/json'}
+
+ if module.check_mode:
+ _response, info = fetch_url(
+ module, url, headers=headers, method='POST')
+
+ if info['status'] == 400:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status']))
+
+ custom_details = {}
+
+ if module.params['user']:
+ custom_details['user'] = module.params['user']
+
+ if module.params['repo']:
+ custom_details['repo'] = module.params['repo']
+
+ if module.params['revision']:
+ custom_details['revision'] = module.params['revision']
+
+ if module.params['environment']:
+ custom_details['environment'] = module.params['environment']
+
+ now = datetime.utcnow()
+ timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ payload = {
+ 'summary': module.params['summary'],
+ 'source': module.params['source'],
+ 'timestamp': timestamp,
+ 'custom_details': custom_details
+ }
+
+ event = {
+ 'routing_key': module.params['integration_key'],
+ 'payload': payload
+ }
+
+ if module.params['link_url']:
+ link = {
+ 'href': module.params['link_url']
+ }
+
+ if module.params['link_text']:
+ link['text'] = module.params['link_text']
+
+ event['links'] = [link]
+
+ _response, info = fetch_url(
+ module, url, data=module.jsonify(event), headers=headers, method='POST')
+
+ if info['status'] == 202:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Creating PagerDuty change event failed with %d' % (info['status']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py
new file mode 100644
index 00000000..4b20a321
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pagerduty_user
+short_description: Manage a user account on PagerDuty
+description:
+ - This module manages the creation/removal of a user account on PagerDuty.
+version_added: '1.3.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - pdpyras python module = 4.1.1
+ - PagerDuty API Access
+options:
+ access_token:
+ description:
+ - An API access token to authenticate with the PagerDuty REST API.
+ required: true
+ type: str
+ pd_user:
+ description:
+ - Name of the user in PagerDuty.
+ required: true
+ type: str
+ pd_email:
+ description:
+ - The user's email address.
+ - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ pd_role:
+ description:
+ - The user's role.
+ choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']
+ default: 'responder'
+ type: str
+ state:
+ description:
+ - State of the user.
+ - On C(present), it creates a user if the user doesn't exist.
+ - On C(absent), it removes a user if the account exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ pd_teams:
+ description:
+ - The teams to which the user belongs.
+ - Required if I(state=present).
+ type: list
+ elements: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Create a user account on PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ pd_role: user_pd_role
+ pd_teams: user_pd_teams
+ state: "present"
+
+- name: Remove a user account from PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ state: "absent"
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+from os import path
+
+try:
+ from pdpyras import APISession
+ HAS_PD_PY = True
+except ImportError:
+ HAS_PD_PY = False
+ PD_IMPORT_ERR = traceback.format_exc()
+
+try:
+ from pdpyras import PDClientError
+ HAS_PD_CLIENT_ERR = True
+except ImportError:
+ HAS_PD_CLIENT_ERR = False
+ PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc()
+
+
+class PagerDutyUser(object):
+ def __init__(self, module, session):
+ self._module = module
+ self._apisession = session
+
+ # check if the user exists
+ def does_user_exist(self, pd_email):
+ for user in self._apisession.iter_all('users'):
+ if user['email'] == pd_email:
+ return user['id']
+
+ # create a user account on PD
+ def add_pd_user(self, pd_name, pd_email, pd_role):
+ try:
+ user = self._apisession.persist('users', 'email', {
+ "name": pd_name,
+ "email": pd_email,
+ "type": "user",
+ "role": pd_role,
+ })
+ return user
+
+ except PDClientError as e:
+ if e.response.status_code == 400:
+ self._module.fail_json(
+ msg="Failed to add %s due to invalid argument" % (pd_name))
+ if e.response.status_code == 401:
+ self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name))
+ if e.response.status_code == 402:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to add %s due to reaching the limit of making requests" % (pd_name))
+
+ # delete a user account from PD
+ def delete_user(self, pd_user_id, pd_name):
+ try:
+ user_path = path.join('/users/', pd_user_id)
+ self._apisession.rdelete(user_path)
+
+ except PDClientError as e:
+ if e.response.status_code == 404:
+ self._module.fail_json(
+ msg="Failed to remove %s as user was not found" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 401:
+ # print out the list of incidents
+ pd_incidents = self.get_incidents_assigned_to_user(pd_user_id)
+ self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name))
+
+ # get incidents assigned to a user
+ def get_incidents_assigned_to_user(self, pd_user_id):
+ incident_info = {}
+ incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]})
+
+ for incident in incidents:
+ incident_info = {
+ 'title': incident['title'],
+ 'key': incident['incident_key'],
+ 'status': incident['status']
+ }
+ return incident_info
+
+ # add a user to a team/teams
+ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role):
+ updated_team = None
+ for team in pd_teams:
+ team_info = self._apisession.find('teams', team, attribute='name')
+ if team_info is not None:
+ try:
+ updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={
+ 'role': pd_role
+ })
+ except PDClientError:
+ updated_team = None
+ return updated_team
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ access_token=dict(type='str', required=True, no_log=True),
+ pd_user=dict(type='str', required=True),
+ pd_email=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ pd_role=dict(type='str', default='responder',
+ choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
+ pd_teams=dict(type='list', elements='str', required=False)),
+ required_if=[['state', 'present', ['pd_teams']], ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PD_PY:
+ module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR)
+
+ if not HAS_PD_CLIENT_ERR:
+ module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR)
+
+ access_token = module.params['access_token']
+ pd_user = module.params['pd_user']
+ pd_email = module.params['pd_email']
+ state = module.params['state']
+ pd_role = module.params['pd_role']
+ pd_teams = module.params['pd_teams']
+
+ if pd_role:
+ pd_role_gui_value = {
+ 'global_admin': 'admin',
+ 'manager': 'user',
+ 'responder': 'limited_user',
+ 'observer': 'observer',
+ 'stakeholder': 'read_only_user',
+ 'limited_stakeholder': 'read_only_limited_user',
+ 'restricted_access': 'restricted_access'
+ }
+ pd_role = pd_role_gui_value[pd_role]
+
+ # authenticate with PD API
+ try:
+ session = APISession(access_token)
+ except PDClientError as e:
+ module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e)
+
+ user = PagerDutyUser(module, session)
+
+ user_exists = user.does_user_exist(pd_email)
+
+ if user_exists:
+ if state == "absent":
+ # remove user
+ if not module.check_mode:
+ user.delete_user(user_exists, pd_user)
+ module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user)
+ else:
+ module.exit_json(changed=False, result="User %s already exists." % pd_user)
+
+ # in case that the user does not exist
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="User %s was not found." % pd_user)
+
+ else:
+ # add user, adds user with the default notification rule and contact info (email)
+ if not module.check_mode:
+ user.add_pd_user(pd_user, pd_email, pd_role)
+ # get user's id
+ pd_user_id = user.does_user_exist(pd_email)
+ # add a user to the team/s
+ user.add_user_to_teams(pd_user_id, pd_teams, pd_role)
+ module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py
new file mode 100644
index 00000000..23ed2545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: [ "running", "paused", "started", "stopped" ]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+- name: Pause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
+
+- name: Unpause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
+'''
+
+import traceback
+
+PINGDOM_IMP_ERR = None
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except Exception:
+ PINGDOM_IMP_ERR = traceback.format_exc()
+ HAS_PINGDOM = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True, no_log=True),
+ key=dict(required=True, no_log=True),
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR)
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py
new file mode 100644
index 00000000..161361b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ type: str
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ type: str
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ type: str
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ type: str
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ type: str
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ type: str
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+'''
+
+EXAMPLES = '''
+ - name: Rollbar deployment notification
+ community.general.rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
+
+ - name: Notify rollbar about current git revision deployment by current user
+ community.general.rollbar_deployment:
+ token: "{{ rollbar_access_token }}"
+ environment: production
+ revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}"
+ user: "{{ lookup('env', 'USER') }}"
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data, method='POST')
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py
new file mode 100644
index 00000000..9ebe2765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ type: str
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ handlers:
+ type: list
+ description:
+ - List of handlers to notify when the check fails
+ default: []
+ subscribers:
+ type: list
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ default: []
+ interval:
+ type: int
+ description:
+ - Check interval in seconds
+ timeout:
+ type: int
+ description:
+ - Timeout for the check
+ - If not specified, it defaults to 10.
+ ttl:
+ type: int
+ description:
+ - Time to live in seconds until the check is considered stale
+ handle:
+ description:
+ - Whether the check should be handled or not
+ - Default is C(false).
+ type: bool
+ subdue_begin:
+ type: str
+ description:
+ - When to disable handling of check failures
+ subdue_end:
+ type: str
+ description:
+ - When to enable handling of check failures
+ dependencies:
+ type: list
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ type: bool
+ default: 'no'
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ - Default is C(false).
+ type: bool
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ - Default is C(false).
+ type: bool
+ occurrences:
+ type: int
+ description:
+ - Number of event occurrences before the handler should take action
+ - If not specified, defaults to 1.
+ refresh:
+ type: int
+ description:
+ - Number of seconds handlers should wait before taking second action
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ - Default is C(false).
+ type: bool
+ low_flap_threshold:
+ type: int
+ description:
+ - The low threshold for flap detection
+ high_flap_threshold:
+ type: int
+ description:
+ - The high threshold for flap detection
+ custom:
+ type: dict
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ default: {}
+ source:
+ type: str
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: Get cpu metrics
+ community.general.sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: yes
+ handlers: relay
+ subscribers: common
+ interval: 60
+
+# Check whether nginx is running
+- name: Check nginx process
+ community.general.sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: Check disk
+ community.general.sensu_check:
+ name: check_disk_capacity
+ state: absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'ttl',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k, v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py
new file mode 100644
index 00000000..35444f60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_client
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu client configuration
+description:
+ - Manages Sensu client configuration.
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the client should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the client. The name cannot contain special characters or spaces.
+ - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
+ address:
+ type: str
+ description:
+ - An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
+ subscriptions:
+ type: list
+ description:
+ - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
+ - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
+ - The subscriptions array items must be strings.
+ safe_mode:
+ description:
+ - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
+ type: bool
+ default: 'no'
+ redact:
+ type: list
+ description:
+ - Client definition attributes to redact (values) when logging and sending client keepalives.
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the Sensu client socket.
+ keepalives:
+ description:
+ - If Sensu should monitor keepalives for this client.
+ type: bool
+ default: 'yes'
+ keepalive:
+ type: dict
+ description:
+ - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
+ registration:
+ type: dict
+ description:
+ - The registration definition scope, used to configure Sensu registration event handlers.
+ deregister:
+ description:
+ - If a deregistration event should be created upon Sensu client process stop.
+ - Default is C(false).
+ type: bool
+ deregistration:
+ type: dict
+ description:
+ - The deregistration definition scope, used to configure automated Sensu client de-registration.
+ ec2:
+ type: dict
+ description:
+ - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
+ chef:
+ type: dict
+ description:
+ - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
+ puppet:
+ type: dict
+ description:
+ - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
+ servicenow:
+ type: dict
+ description:
+ - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Minimum possible configuration
+- name: Configure Sensu client
+ community.general.sensu_client:
+ subscriptions:
+ - default
+
+# With customization
+- name: Configure Sensu client
+ community.general.sensu_client:
+ name: "{{ ansible_fqdn }}"
+ address: "{{ ansible_default_ipv4['address'] }}"
+ subscriptions:
+ - default
+ - webserver
+ redact:
+ - password
+ socket:
+ bind: 127.0.0.1
+ port: 3030
+ keepalive:
+ thresholds:
+ warning: 180
+ critical: 300
+ handlers:
+ - email
+ custom:
+ - broadcast: irc
+ occurrences: 3
+ register: client
+ notify:
+ - Restart sensu-client
+
+- name: Secure Sensu client configuration file
+ ansible.builtin.file:
+ path: "{{ client['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+
+- name: Delete the Sensu client configuration
+ community.general.sensu_client:
+ state: "absent"
+'''
+
+RETURN = '''
+config:
+ description: Effective client configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'client', 'subscriptions': ['default']}
+file:
+ description: Path to the client configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/client.json"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=False),
+ address=dict(type='str', required=False),
+ subscriptions=dict(type='list', required=False),
+ safe_mode=dict(type='bool', required=False, default=False),
+ redact=dict(type='list', required=False),
+ socket=dict(type='dict', required=False),
+ keepalives=dict(type='bool', required=False, default=True),
+ keepalive=dict(type='dict', required=False),
+ registration=dict(type='dict', required=False),
+ deregister=dict(type='bool', required=False),
+ deregistration=dict(type='dict', required=False),
+ ec2=dict(type='dict', required=False),
+ chef=dict(type='dict', required=False),
+ puppet=dict(type='dict', required=False),
+ servicenow=dict(type='dict', required=False)
+ ),
+ required_if=[
+ ['state', 'present', ['subscriptions']]
+ ]
+ )
+
+ state = module.params['state']
+ path = "/etc/sensu/conf.d/client.json"
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build client configuration from module arguments
+ config = {'client': {}}
+ args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
+ 'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
+ 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['client'][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Client configuration is already up to date',
+ config=config['client'],
+ file=path)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Client configuration would have been updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+
+ try:
+ with open(path, 'w') as client:
+ client.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Client configuration updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py
new file mode 100644
index 00000000..53152edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_handler
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu handler configuration
+description:
+ - Manages Sensu handler configuration
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the handler should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the handler. The name cannot contain special characters or spaces.
+ required: True
+ type:
+ type: str
+ description:
+ - The handler type
+ choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
+ filter:
+ type: str
+ description:
+ - The Sensu event filter (name) to use when filtering events for the handler.
+ filters:
+ type: list
+ description:
+ - An array of Sensu event filters (names) to use when filtering events for the handler.
+ - Each array item must be a string.
+ severities:
+ type: list
+ description:
+ - An array of check result severities the handler will handle.
+ - 'NOTE: event resolution bypasses this filtering.'
+ - "Example: [ 'warning', 'critical', 'unknown' ]."
+ mutator:
+ type: str
+ description:
+ - The Sensu event mutator (name) to use to mutate event data for the handler.
+ timeout:
+ type: int
+ description:
+ - The handler execution duration timeout in seconds (hard stop).
+ - Only used by pipe and tcp handler types.
+ default: 10
+ handle_silenced:
+ description:
+ - If events matching one or more silence entries should be handled.
+ type: bool
+ default: 'no'
+ handle_flapping:
+ description:
+ - If events in the flapping state should be handled.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - The handler command to be executed.
+ - The event data is passed to the process via STDIN.
+ - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the TCP/UDP handler socket.
+ - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
+ pipe:
+ type: dict
+ description:
+ - The pipe definition scope, used to configure the Sensu transport pipe.
+ - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
+ handlers:
+ type: list
+ description:
+ - An array of Sensu event handlers (names) to use for events using the handler set.
+ - Each array item must be a string.
+ - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Configure a handler that sends event data as STDIN (pipe)
+- name: Configure IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ type: "pipe"
+ command: "/usr/local/bin/notify-irc.sh"
+ severities:
+ - "ok"
+ - "critical"
+ - "warning"
+ - "unknown"
+ timeout: 15
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+# Delete a handler
+- name: Delete IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ state: "absent"
+
+# Example of a TCP handler
+- name: Configure TCP Sensu handler
+ community.general.sensu_handler:
+ name: "tcp_handler"
+ type: "tcp"
+ timeout: 30
+ socket:
+ host: "10.0.1.99"
+ port: 4444
+ register: handler
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+- name: Secure Sensu handler configuration file
+ ansible.builtin.file:
+ path: "{{ handler['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+'''
+
+RETURN = '''
+config:
+ description: Effective handler configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+file:
+ description: Path to the handler configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/handlers/irc.json"
+name:
+ description: Name of the handler
+ returned: success
+ type: str
+ sample: "irc"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
+ filter=dict(type='str', required=False),
+ filters=dict(type='list', required=False),
+ severities=dict(type='list', required=False),
+ mutator=dict(type='str', required=False),
+ timeout=dict(type='int', required=False, default=10),
+ handle_silenced=dict(type='bool', required=False, default=False),
+ handle_flapping=dict(type='bool', required=False, default=False),
+ command=dict(type='str', required=False),
+ socket=dict(type='dict', required=False),
+ pipe=dict(type='dict', required=False),
+ handlers=dict(type='list', required=False),
+ ),
+ required_if=[
+ ['state', 'present', ['type']],
+ ['type', 'pipe', ['command']],
+ ['type', 'tcp', ['socket']],
+ ['type', 'udp', ['socket']],
+ ['type', 'transport', ['pipe']],
+ ['type', 'set', ['handlers']]
+ ]
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build handler configuration from module arguments
+ config = {'handlers': {name: {}}}
+ args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
+ 'handle_silenced', 'handle_flapping', 'command', 'socket',
+ 'pipe', 'handlers']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['handlers'][name][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Handler configuration is already up to date',
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Handler configuration would have been updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ try:
+ with open(path, 'w') as handler:
+ handler.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Handler configuration updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py
new file mode 100644
index 00000000..12dc5d20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Steven Bambling <smbambling@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_silence
+author: Steven Bambling (@smbambling)
+short_description: Manage Sensu silence entries
+description:
+ - Create and clear (delete) a silence entries via the Sensu API
+ for subscriptions and checks.
+options:
+ check:
+ type: str
+ description:
+ - Specifies the check which the silence entry applies to.
+ creator:
+ type: str
+ description:
+ - Specifies the entity responsible for this entry.
+ expire:
+ type: int
+ description:
+ - If specified, the silence entry will be automatically cleared
+ after this number of seconds.
+ expire_on_resolve:
+ description:
+ - If specified as true, the silence entry will be automatically
+ cleared once the condition it is silencing is resolved.
+ type: bool
+ reason:
+ type: str
+ description:
+ - If specified, this free-form string is used to provide context or
+ rationale for the reason this silence entry was created.
+ state:
+ type: str
+ description:
+ - Specifies to create or clear (delete) a silence entry via the Sensu API
+ default: present
+ choices: ['present', 'absent']
+ subscription:
+ type: str
+ description:
+ - Specifies the subscription which the silence entry applies to.
+ - To create a silence entry for a client prepend C(client:) to client name.
+ Example - C(client:server1.example.dev)
+ required: true
+ url:
+ type: str
+ description:
+ - Specifies the URL of the Sensu monitoring host server.
+ required: false
+ default: http://127.0.01:4567
+'''
+
+EXAMPLES = '''
+# Silence ALL checks for a given client
+- name: Silence server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ creator: "{{ ansible_user_id }}"
+ reason: Performing maintenance
+
+# Silence specific check for a client
+- name: Silence CPU_Usage check for server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ check: CPU_Usage
+ creator: "{{ ansible_user_id }}"
+ reason: Investigation alert issue
+
+# Silence multiple clients from a dict
+ silence:
+ server1.example.dev:
+ reason: 'Deployment in progress'
+ server2.example.dev:
+ reason: 'Deployment in progress'
+
+- name: Silence several clients from a dict
+ community.general.sensu_silence:
+ subscription: "client:{{ item.key }}"
+ reason: "{{ item.value.reason }}"
+ creator: "{{ ansible_user_id }}"
+ with_dict: "{{ silence }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def query(module, url, check, subscription):
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='GET',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] == 500:
+ module.fail_json(
+ msg="Failed to query silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def clear(module, url, check, subscription):
+ # Test if silence exists before clearing
+ (rc, out, changed) = query(module, url, check, subscription)
+
+ d = dict((i['subscription'], i['check']) for i in out)
+ subscription_exists = subscription in d
+ if check and subscription_exists:
+ exists = (check == d[subscription])
+ else:
+ exists = subscription_exists
+
+ # If check/subscription doesn't exist
+ # exit with changed state of False
+ if not exists:
+ return False, out, changed
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced/clear'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 204:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def create(
+ module, url, check, creator, expire,
+ expire_on_resolve, reason, subscription):
+ (rc, out, changed) = query(module, url, check, subscription)
+ for i in out:
+ if (i['subscription'] == subscription):
+ if (
+ (check is None or check == i['check']) and
+ (
+ creator == '' or
+ creator == i['creator']) and
+ (
+ reason == '' or
+ reason == i['reason']) and
+ (
+ expire is None or expire == i['expire']) and
+ (
+ expire_on_resolve is None or
+ expire_on_resolve == i['expire_on_resolve']
+ )
+ ):
+ return False, out, False
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'creator': creator,
+ 'expire': expire,
+ 'expire_on_resolve': expire_on_resolve,
+ 'reason': reason,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 201:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" %
+ (subscription, info['msg'])
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ check=dict(required=False),
+ creator=dict(required=False),
+ expire=dict(type='int', required=False),
+ expire_on_resolve=dict(type='bool', required=False),
+ reason=dict(required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ subscription=dict(required=True),
+ url=dict(required=False, default='http://127.0.01:4567'),
+ ),
+ supports_check_mode=True
+ )
+
+ url = module.params['url']
+ check = module.params['check']
+ creator = module.params['creator']
+ expire = module.params['expire']
+ expire_on_resolve = module.params['expire_on_resolve']
+ reason = module.params['reason']
+ subscription = module.params['subscription']
+ state = module.params['state']
+
+ if state == 'present':
+ (rc, out, changed) = create(
+ module, url, check, creator,
+ expire, expire_on_resolve, reason, subscription
+ )
+
+ if state == 'absent':
+ (rc, out, changed) = clear(module, url, check, subscription)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py
new file mode 100644
index 00000000..6316254d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ type: str
+ description:
+ - The name of the channel
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann (@andsens)
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the module changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: Subscribe to nginx checks
+ community.general.sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: Unsubscribe from common checks
+ community.general.sensu_subscription: name=common state=absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ config = json.load(open(path))
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed, reasons
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py
new file mode 100644
index 00000000..77e3b153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Renato Orgito <orgito@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: spectrum_device
+short_description: Creates/deletes devices in CA Spectrum.
+description:
+ - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html).
+ - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1
+author: "Renato Orgito (@orgito)"
+options:
+ device:
+ type: str
+ aliases: [ host, name ]
+ required: true
+ description:
+ - IP address of the device.
+ - If a hostname is given, it will be resolved to the IP address.
+ community:
+ type: str
+ description:
+ - SNMP community used for device discovery.
+ - Required when C(state=present).
+ required: true
+ landscape:
+ type: str
+ required: true
+ description:
+ - Landscape handle of the SpectroServer to which add or remove the device.
+ state:
+ type: str
+ required: false
+ description:
+ - On C(present) creates the device when it does not exist.
+ - On C(absent) removes the device when it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ url:
+ type: str
+ aliases: [ oneclick_url ]
+ required: true
+ description:
+ - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port]
+ url_username:
+ type: str
+ aliases: [ oneclick_user ]
+ required: true
+ description:
+ - Oneclick user name.
+ url_password:
+ type: str
+ aliases: [ oneclick_password ]
+ required: true
+ description:
+ - Oneclick user password.
+ use_proxy:
+ required: false
+ description:
+ - if C(no), it will not use a proxy, even if one is defined in an environment
+ variable on the target hosts.
+ default: 'yes'
+ type: bool
+ validate_certs:
+ required: false
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+ agentport:
+ type: int
+ required: false
+ description:
+ - UDP port used for SNMP discovery.
+ default: 161
+notes:
+ - The devices will be created inside the I(Universe) container of the specified landscape.
+ - All the operations will be performed only on the specified landscape.
+'''
+
+EXAMPLES = '''
+- name: Add device to CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ community: secret
+ landscape: '0x100000'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ state: present
+
+
+- name: Remove device from CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ landscape: '{{ landscape_handle }}'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ use_proxy: no
+ state: absent
+'''
+
+RETURN = '''
+device:
+ description: device data when state = present
+ returned: success
+ type: dict
+ sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+'''
+
+from socket import gethostbyname, gaierror
+import xml.etree.ElementTree as ET
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(resource, xml=None, method=None):
+ headers = {
+ "Content-Type": "application/xml",
+ "Accept": "application/xml"
+ }
+
+ url = module.params['oneclick_url'] + '/spectrum/restful/' + resource
+
+ response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45)
+
+ if info['status'] == 401:
+ module.fail_json(msg="failed to authenticate to Oneclick server")
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ return response.read()
+
+
+def post(resource, xml=None):
+ return request(resource, xml=xml, method='POST')
+
+
+def delete(resource):
+ return request(resource, xml=None, method='DELETE')
+
+
+def get_ip():
+ try:
+ device_ip = gethostbyname(module.params.get('device'))
+ except gaierror:
+ module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device'))
+
+ return device_ip
+
+
+def get_device(device_ip):
+ """Query OneClick for the device using the IP Address"""
+ resource = '/models'
+ landscape_min = "0x%x" % int(module.params.get('landscape'), 16)
+ landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000)
+
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <rs:model-request throttlesize="5"
+ xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ <action-models>
+ <filtered-models>
+ <and>
+ <equals>
+ <model-type>SearchManager</model-type>
+ </equals>
+ <greater-than>
+ <attribute id="0x129fa">
+ <value>{mh_min}</value>
+ </attribute>
+ </greater-than>
+ <less-than>
+ <attribute id="0x129fa">
+ <value>{mh_max}</value>
+ </attribute>
+ </less-than>
+ </and>
+ </filtered-models>
+ <action>FIND_DEV_MODELS_BY_IP</action>
+ <attribute id="AttributeID.NETWORK_ADDRESS">
+ <value>{search_ip}</value>
+ </attribute>
+ </action-models>
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ <rs:requested-attribute id="0x12d7f" /> <!--Network Address-->
+ </rs:model-request>
+ """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max)
+
+ result = post(resource, xml=xml)
+
+ root = ET.fromstring(result)
+
+ if root.get('total-models') == '0':
+ return None
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+
+ # get the first device
+ model = root.find('ca:model-responses', namespace).find('ca:model', namespace)
+
+ if model.get('error'):
+ module.fail_json(msg="error checking device: %s" % model.get('error'))
+
+ # get the attributes
+ model_handle = model.get('mh')
+
+ model_address = model.find('./*[@id="0x12d7f"]').text
+
+ # derive the landscape handler from the model handler of the device
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=model_address,
+ landscape=model_landscape)
+
+ return device
+
+
+def add_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device:
+ module.exit_json(changed=False, device=device)
+
+ if module.check_mode:
+ device = dict(
+ model_handle=None,
+ address=device_ip,
+ landscape="0x%x" % int(module.params.get('landscape'), 16))
+ module.exit_json(changed=True, device=device)
+
+ resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community')
+ resource += '&landscapeid=' + module.params.get('landscape')
+
+ if module.params.get('agentport', None):
+ resource += '&agentport=' + str(module.params.get('agentport', 161))
+
+ result = post(resource)
+ root = ET.fromstring(result)
+
+ if root.get('error') != 'Success':
+ module.fail_json(msg=root.get('error-message'))
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ model = root.find('ca:model', namespace)
+
+ model_handle = model.get('mh')
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=device_ip,
+ landscape=model_landscape,
+ )
+
+ module.exit_json(changed=True, device=device)
+
+
+def remove_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ resource = '/model/' + device['model_handle']
+ result = delete(resource)
+
+ root = ET.fromstring(result)
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ error = root.find('ca:error', namespace).text
+
+ if error != 'Success':
+ error_message = root.find('ca:error-message', namespace).text
+ module.fail_json(msg="%s %s" % (error, error_message))
+
+ module.exit_json(changed=True)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(required=True, aliases=['host', 'name']),
+ landscape=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ?
+ agentport=dict(type='int', default=161),
+ url=dict(required=True, aliases=['oneclick_url']),
+ url_username=dict(required=True, aliases=['oneclick_user']),
+ url_password=dict(required=True, no_log=True, aliases=['oneclick_password']),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ required_if=[('state', 'present', ['community'])],
+ supports_check_mode=True
+ )
+
+ if module.params.get('state') == 'present':
+ add_device()
+ else:
+ remove_device()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py
new file mode 100644
index 00000000..8e2d19a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ type: str
+ description:
+ - API key.
+ required: true
+ event:
+ type: str
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: true
+ revision_id:
+ type: str
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ deployed_by:
+ type: str
+ description:
+ - The person or robot responsible for deploying the code
+ default: "Ansible"
+ deployed_to:
+ type: str
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ repository:
+ type: str
+ description:
+ - The repository (or project) deployed
+ msg:
+ type: str
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ annotated_by:
+ type: str
+ description:
+ - The person or robot who the annotation should be attributed to.
+ default: "Ansible"
+ level:
+ type: str
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ default: 'INFO'
+ instance_id:
+ type: str
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ event_epoch:
+ type: str
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+'''
+
+EXAMPLES = '''
+- name: Send a code deploy event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- name: Send an annotation event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict( # @TODO add types
+ key=dict(required=True, no_log=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(), # @TODO int?
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception as e:
+ module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception as e:
+ module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py
new file mode 100644
index 00000000..0414f6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ type: str
+ description:
+ - A descriptive title for the maintenance window
+ default: "A new maintenance window"
+ desc:
+ type: str
+ description:
+ - Message describing the maintenance window
+ default: "Created by Ansible"
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ type: str
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ type: str
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ type: str
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ type: str
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ default: "https://api.status.io"
+ components:
+ type: list
+ description:
+ - The given name of your component (server name)
+ aliases: ['component']
+ containers:
+ type: list
+ description:
+ - The given name of your container (data center)
+ aliases: ['container']
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ type: bool
+ default: 'no'
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ type: bool
+ default: 'no'
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ type: bool
+ default: 'no'
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_id:
+ type: str
+ description:
+ - The maintenance id number when deleting a maintenance window
+ minutes:
+ type: int
+ description:
+ - The length of time in UTC that the maintenance will run
+ (starting from playbook runtime)
+ default: 10
+ start_date:
+ type: str
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ start_time:
+ type: str
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+'''
+
+EXAMPLES = '''
+- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
+ community.general.statusio_maintenance:
+ title: Router Upgrade from ansible
+ desc: Performing a Router Upgrade
+ components: server1.example.com
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+
+- name: Create a maintenance window for 60 minutes on server1 and server2
+ community.general.statusio_maintenance:
+ title: Routine maintenance
+ desc: Some security updates
+ components:
+ - server1.example.com
+ - server2.example.com
+ minutes: 60
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+ delegate_to: localhost
+
+- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
+ community.general.statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: Primary Data Center
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ start_date: 01/01/2016
+ start_time: 12:00
+ minutes: 1440
+
+- name: Delete a maintenance window
+ community.general.statusio_maintenance:
+ title: Remove a maintenance window
+ maintenance_id: 561f90faf74bc94a4700087b
+ statuspage: statuspage_id
+ api_id: api_id
+ api_key: api_key
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except Exception as e:
+ return 1, None, None, to_native(e)
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py
new file mode 100644
index 00000000..bb4e60fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ choices: [ "started", "paused" ]
+ monitorid:
+ type: str
+ description:
+ - ID of the monitor to check.
+ required: true
+ apikey:
+ type: str
+ description:
+ - Uptime Robot API key.
+ required: true
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+- name: Pause the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
+
+- name: Start the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+API_BASE = "https://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True, no_log=True),
+ monitorid=dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py
new file mode 100644
index 00000000..0551ab20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+description:
+ - Publish a message on an MQTT topic.
+options:
+ server:
+ type: str
+ description:
+ - MQTT broker address/name
+ default: localhost
+ port:
+ type: int
+ description:
+ - MQTT broker port number
+ default: 1883
+ username:
+ type: str
+ description:
+ - Username to authenticate against the broker.
+ password:
+ type: str
+ description:
+ - Password for C(username) to authenticate against the broker.
+ client_id:
+ type: str
+ description:
+ - MQTT client identifier
+ - If not specified, a value C(hostname + pid) will be used.
+ topic:
+ type: str
+ description:
+ - MQTT topic name
+ required: true
+ payload:
+ type: str
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ qos:
+ type: str
+ description:
+ - QoS (Quality of Service)
+ default: "0"
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ type: bool
+ default: 'no'
+ ca_cert:
+ type: path
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ aliases: [ ca_certs ]
+ client_cert:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ certfile ]
+ client_key:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ keyfile ]
+ tls_version:
+ description:
+ - Specifies the version of the SSL/TLS protocol to be used.
+ - By default (if the python version supports it) the highest TLS version is
+ detected. If unavailable, TLS v1 is used.
+ type: str
+ choices:
+ - tlsv1.1
+ - tlsv1.2
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- name: Publish a message on an MQTT topic
+ community.general.mqtt:
+ topic: 'service/ansible/{{ ansible_hostname }}'
+ payload: 'Hello at {{ ansible_date_time.iso8601 }}'
+ qos: 0
+ retain: False
+ client_id: ans001
+ delegate_to: localhost
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+import os
+import ssl
+import traceback
+import platform
+from distutils.version import LooseVersion
+
+HAS_PAHOMQTT = True
+PAHOMQTT_IMP_ERR = None
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ PAHOMQTT_IMP_ERR = traceback.format_exc()
+ HAS_PAHOMQTT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ tls_map = {}
+
+ try:
+ tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
+ except AttributeError:
+ pass
+
+ try:
+ tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
+ except AttributeError:
+ pass
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
+ client_cert=dict(default=None, type='path', aliases=['certfile']),
+ client_key=dict(default=None, type='path', aliases=['keyfile']),
+ tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_cert", None)
+ certfile = module.params.get("client_cert", None)
+ keyfile = module.params.get("client_key", None)
+ tls_version = module.params.get("tls_version", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth = None
+ if username is not None:
+ auth = {'username': username, 'password': password}
+
+ tls = None
+ if ca_certs is not None:
+ if tls_version:
+ tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
+ else:
+ if LooseVersion(platform.python_version()) <= "3.5.2":
+ # Specifying `None` on later versions of python seems sufficient to
+ # instruct python to autonegotiate the SSL/TLS connection. On versions
+ # 3.5.2 and lower though we need to specify the version.
+ #
+ # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
+ # not available until 3.5.3.
+ tls_version = ssl.PROTOCOL_SSLv23
+
+ tls = {
+ 'ca_certs': ca_certs,
+ 'certfile': certfile,
+ 'keyfile': keyfile,
+ 'tls_version': tls_version,
+ }
+
+ try:
+ mqtt.single(
+ topic,
+ payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth,
+ tls=tls
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="unable to publish to MQTT broker %s" % to_native(e),
+ exception=traceback.format_exc()
+ )
+
+ module.exit_json(changed=False, topic=topic)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py
new file mode 100644
index 00000000..e6c5f183
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host.
+description:
+ - Add or remove MSSQL databases from a remote host.
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ aliases: [ db ]
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with
+ type: str
+ login_host:
+ description:
+ - Host running the database
+ type: str
+ required: true
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ default: '1433'
+ type: str
+ state:
+ description:
+ - The database state
+ default: present
+ choices: [ "present", "absent", "import" ]
+ type: str
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ type: str
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
+ type: bool
+ default: 'no'
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig (@vedit)
+'''
+
+EXAMPLES = '''
+- name: Create a new database with name 'jackdata'
+ community.general.mssql_db:
+ name: jackdata
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file to remote host
+ ansible.builtin.copy:
+ src: dump.sql
+ dest: /tmp
+
+- name: Restore the dump file to database 'my_db'
+ community.general.mssql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import traceback
+
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ mssql_found = False
+else:
+ mssql_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except Exception:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ with open(target, 'r') as backup:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py
new file mode 100644
index 00000000..f82bd7ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_aggregate
+
+short_description: Manage NetApp cDOT aggregates.
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_aggregate) instead.
+
+description:
+- Create or destroy aggregates on NetApp cDOT.
+
+options:
+
+ state:
+ required: true
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+
+ name:
+ required: true
+ description:
+ - The name of the aggregate to manage.
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+
+'''
+
+EXAMPLES = """
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTAggregate(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['disk_count'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.disk_count = p['disk_count']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_aggr(self):
+ """
+ Checks if aggregate exists.
+
+ :return:
+ True if aggregate found
+ False if aggregate is not found
+ :rtype: bool
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(e.code) == "13040":
+ return False
+ else:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_aggr(self):
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-create', **{'aggregate': self.name,
+ 'disk-count': str(self.disk_count)})
+
+ try:
+ self.server.invoke_successfully(aggr_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.name,
+ 'new-aggregate-name':
+ self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ aggregate_exists = self.get_aggr()
+ rename_aggregate = False
+
+ # check if anything needs to be changed (add/delete/update)
+
+ if aggregate_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == self.name:
+ rename_aggregate = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ # Aggregate does not exist, but requested state is present.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not aggregate_exists:
+ self.create_aggr()
+
+ else:
+ if rename_aggregate:
+ self.rename_aggregate()
+
+ elif self.state == 'absent':
+ self.delete_aggr()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTAggregate()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py
new file mode 100644
index 00000000..36c5416a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_license
+
+short_description: Manage NetApp cDOT protocol and feature licenses
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_license) instead.
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+ default: false
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+ default: false
+
+ serial_number:
+ description:
+ - Serial number of the node associated with the license.
+ - This parameter is used primarily when removing license for a specific service.
+ - If this parameter is not provided, the cluster serial number is used by default.
+
+ licenses:
+ description:
+ - List of licenses to add or remove.
+ - Please note that trying to remove a non-existent license will throw an error.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ licenses:
+ nfs: #################
+ cifs: #################
+ iscsi: #################
+ fcp: #################
+ snaprestore: #################
+ flexclone: #################
+
+- name: Remove licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ licenses:
+ nfs: remove
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLicense(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ serial_number=dict(required=False, type='str', default=None),
+ remove_unused=dict(default=False, type='bool'),
+ remove_expired=dict(default=False, type='bool'),
+ licenses=dict(default=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.serial_number = p['serial_number']
+ self.remove_unused = p['remove_unused']
+ self.remove_expired = p['remove_expired']
+ self.licenses = p['licenses']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, remove_list):
+ """
+ Remove requested licenses
+ :param:
+ remove_list : List of packages to remove
+
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ for package in remove_list:
+ license_delete.add_new_child('package', package)
+
+ if self.serial_number is not None:
+ license_delete.add_new_child('serial-number', self.serial_number)
+
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def update_licenses(self):
+ """
+ Update licenses
+ """
+ # Remove unused and expired licenses, if requested.
+ if self.remove_unused:
+ self.remove_unused_licenses()
+
+ if self.remove_expired:
+ self.remove_expired_licenses()
+
+ # Next, add/remove specific requested licenses.
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ remove_list = []
+ for key, value in self.licenses.items():
+ str_value = str(value)
+ # Make sure license is not an empty string.
+ if str_value and str_value.strip():
+ if str_value.lower() == 'remove':
+ remove_list.append(str(key).lower())
+ else:
+ codes.add_new_child('license-code-v2', str_value)
+
+ # Remove requested licenses.
+ if len(remove_list) != 0:
+ self.remove_licenses(remove_list)
+
+ # Add requested licenses
+ if len(codes.get_children()) != 0:
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+ self.update_licenses()
+ new_license_status = self.get_licensing_status()
+
+ if license_status != new_license_status:
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLicense()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py
new file mode 100644
index 00000000..3236dbee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_lun
+
+short_description: Manage NetApp cDOT luns
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_lun) instead.
+
+description:
+- Create, destroy, resize luns on NetApp cDOT.
+
+options:
+
+ state:
+ description:
+ - Whether the specified lun should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the lun to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the lun should exist on.
+ - Required when C(state=present).
+
+ size:
+ description:
+ - The size of the lun in C(size_unit).
+ - Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ force_resize:
+ description:
+ - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize Lun
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLUN(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.force_resize = p['force_resize']
+ self.force_remove = p['force_remove']
+ self.force_remove_fenced = p['force_remove_fenced']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+
+ luns = []
+ tag = None
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.vserver)
+ query_details.add_new_child('volume', self.flexvol_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+
+ tag = result.get_child_content('next-tag')
+
+ if tag is None:
+ break
+
+ # The LUNs have been extracted.
+ # Find the specified lun and extract details.
+ return_value = None
+ for lun in luns:
+ path = lun.get_child_content('path')
+ _rest, _splitter, found_name = path.rpartition('/')
+
+ if found_name == self.name:
+ size = lun.get_child_content('size')
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': path})
+
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value = {
+ 'name': found_name,
+ 'size': size,
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ }
+ else:
+ continue
+
+ return return_value
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **{'path': path,
+ 'size': str(self.size),
+ 'ostype': 'linux'})
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self):
+ """
+ Delete requested LUN
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.force_remove),
+ 'destroy-fenced-lun':
+ str(self.force_remove_fenced)})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.size),
+ 'force': str(self.force_resize)})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def apply(self):
+ property_changed = False
+ multiple_properties_changed = False
+ size_changed = False
+ lun_exists = False
+ lun_detail = self.get_lun()
+
+ if lun_detail:
+ lun_exists = True
+ current_size = lun_detail['size']
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if not int(current_size) == self.size:
+ size_changed = True
+ property_changed = True
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not lun_exists:
+ self.create_lun()
+
+ else:
+ if size_changed:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun()
+ if not size_changed and not \
+ multiple_properties_changed:
+ property_changed = False
+
+ elif self.state == 'absent':
+ self.delete_lun()
+
+ changed = property_changed or size_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLUN()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py
new file mode 100644
index 00000000..9f7ce60d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_qtree
+
+short_description: Manage qtrees
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_qtree) instead.
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Qtree should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the Qtree to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the Qtree should exist on. Required when C(state=present).
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+- name: Create QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTQTree(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_qtree(self):
+ """
+ Checks if the qtree exists.
+
+ :return:
+ True if qtree found
+ False if qtree is not found
+ :rtype: bool
+ """
+
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.vserver,
+ 'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_qtree(self):
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **{'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **{'qtree': path})
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ qtree_exists = False
+ rename_qtree = False
+ qtree_detail = self.get_qtree()
+
+ if qtree_detail:
+ qtree_exists = True
+
+ if self.state == 'absent':
+ # Qtree exists, but requested state is 'absent'.
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == \
+ self.name:
+ changed = True
+ rename_qtree = True
+
+ else:
+ if self.state == 'present':
+ # Qtree does not exist, but requested state is 'present'.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not qtree_exists:
+ self.create_qtree()
+
+ else:
+ if rename_qtree:
+ self.rename_qtree()
+
+ elif self.state == 'absent':
+ self.delete_qtree()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTQTree()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py
new file mode 100644
index 00000000..0227a014
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_svm
+
+short_description: Manage NetApp cDOT svm
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_svm) instead.
+
+description:
+- Create or destroy svm on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ required: true
+
+ root_volume:
+ description:
+ - Root volume of the SVM. Required when C(state=present).
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Required when C(state=present).
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
+ - Possible values are 'unix', 'ntfs', 'mixed'.
+ - The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
+ - Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
+ - Required when C(state=present)
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ community.general.na_cdot_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTSVM(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['root_volume',
+ 'root_volume_aggregate',
+ 'root_volume_security_style'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.root_volume = p['root_volume']
+ self.root_volume_aggregate = p['root_volume_aggregate']
+ self.root_volume_security_style = p['root_volume_security_style']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_vserver(self):
+ """
+ Checks if vserver exists.
+
+ :return:
+ True if vserver found
+ False if vserver is not found
+ :rtype: bool
+ """
+
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+
+ """
+ TODO:
+ Return more relevant parameters about vserver that can
+ be updated by the playbook.
+ """
+ return True
+ else:
+ return False
+
+ def create_vserver(self):
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-create', **{'vserver-name': self.name,
+ 'root-volume': self.root_volume,
+ 'root-volume-aggregate':
+ self.root_volume_aggregate,
+ 'root-volume-security-style':
+ self.root_volume_security_style
+ })
+
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_vserver(self):
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self):
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.name,
+ 'new-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ vserver_exists = self.get_vserver()
+ rename_vserver = False
+ if vserver_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Update properties
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not vserver_exists:
+ self.create_vserver()
+
+ else:
+ if rename_vserver:
+ self.rename_vserver()
+
+ elif self.state == 'absent':
+ self.delete_vserver()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTSVM()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py
new file mode 100644
index 00000000..626e0aa0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user) instead.
+
+description:
+- Create or destroy users.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+
+ application:
+ description:
+ - Applications to grant access to.
+ required: true
+ choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
+
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - password for console application
+ - password, domain, nsswitch, cert for http application.
+ - password, domain, nsswitch, cert for ontapi application.
+ - community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - usm and community for snmp application (when creating SNMPv3 users).
+ - password for sp application.
+ - password for rsh application.
+ - password for telnet application.
+ - password, publickey, domain, nsswitch for ssh application.
+ required: true
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
+
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ community.general.na_cdot_user:
+ state: present
+ name: SampleUser
+ application: ssh
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ application=dict(required=True, type='str', choices=[
+ 'console', 'http', 'ontapi', 'rsh',
+ 'snmp', 'sp', 'ssh', 'telnet']),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password',
+ 'publickey', 'domain',
+ 'nsswitch', 'usm']),
+ set_password=dict(required=False, type='str', default=None),
+ role_name=dict(required=False, type='str'),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.application = p['application']
+ self.authentication_method = p['authentication_method']
+ self.set_password = p['set_password']
+ self.role_name = p['role_name']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_user(self):
+ """
+ Checks if the user exists.
+
+ :return:
+ True if user found
+ False if user is not found
+ :rtype: bool
+ """
+
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16034 denotes a user not being found.
+ if to_native(e.code) == "16034":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def create_user(self):
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method,
+ 'role-name': self.role_name})
+ if self.set_password is not None:
+ user_create.add_new_child('password', self.set_password)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_user(self):
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ self.server.set_vserver(self.vserver)
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.set_password),
+ 'user-name': self.name})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def apply(self):
+ property_changed = False
+ password_changed = False
+ user_exists = self.get_user()
+
+ if user_exists:
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if self.set_password is not None:
+ password_changed = self.change_password()
+ else:
+ if self.state == 'present':
+ # Check if anything needs to be updated
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not user_exists:
+ self.create_user()
+
+ # Add ability to update parameters.
+
+ elif self.state == 'absent':
+ self.delete_user()
+
+ changed = property_changed or password_changed
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py
new file mode 100644
index 00000000..88133200
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user_role
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user_role) instead.
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ default: 'all'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ community.general.na_cdot_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: DEFAULT
+ access_level: none
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.command_directory_name = p['command_directory_name']
+ self.access_level = p['access_level']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_role(self):
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-create', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name,
+ 'access-level':
+ self.access_level})
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ role_exists = self.get_role()
+
+ if role_exists:
+ if self.state == 'absent':
+ changed = True
+
+ # Check if properties need to be updated
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not role_exists:
+ self.create_role()
+
+ # Update properties
+
+ elif self.state == 'absent':
+ self.delete_role()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUserRole()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py
new file mode 100644
index 00000000..c10911d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_volume
+
+short_description: Manage NetApp cDOT volumes
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_volume) instead.
+
+description:
+- Create or destroy volumes on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ infinite:
+ description:
+ - Set True if the volume is an Infinite Volume.
+ type: bool
+ default: 'no'
+
+ online:
+ description:
+ - Whether the specified volume is online, or not.
+ type: bool
+ default: 'yes'
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on. Required when C(state=present).
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+
+ junction_path:
+ description:
+ - Junction path where to mount the volume
+ required: false
+
+ export_policy:
+ description:
+ - Export policy to set for the specified junction path.
+ required: false
+ default: default
+
+ snapshot_policy:
+ description:
+ - Snapshot policy to set for the specified volume.
+ required: false
+ default: default
+
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ aggregate_name: aggr1
+ size: 20
+ size_unit: mb
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ junction_path: /ansibleVolume
+ export_policy: all_nfs_networks
+ snapshot_policy: daily
+
+ - name: Make FlexVol offline
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
+ is_online=dict(required=False, type='bool', default=True, aliases=['online']),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ aggregate_name=dict(type='str'),
+ vserver=dict(required=True, type='str', default=None),
+ junction_path=dict(required=False, type='str', default=None),
+ export_policy=dict(required=False, type='str', default='default'),
+ snapshot_policy=dict(required=False, type='str', default='default'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['aggregate_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.is_infinite = p['is_infinite']
+ self.is_online = p['is_online']
+ self.size_unit = p['size_unit']
+ self.vserver = p['vserver']
+ self.junction_path = p['junction_path']
+ self.export_policy = p['export_policy']
+ self.snapshot_policy = p['snapshot_policy']
+
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.aggregate_name = p['aggregate_name']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_volume(self):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.name)
+ volume_attributes.add_child_elem(volume_id_attributes)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+
+ volume_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(volume_info, True)
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ volume_attributes = result.get_child_by_name(
+ 'attributes-list').get_child_by_name(
+ 'volume-attributes')
+ # Get volume's current size
+ volume_space_attributes = volume_attributes.get_child_by_name(
+ 'volume-space-attributes')
+ current_size = volume_space_attributes.get_child_content('size')
+
+ # Get volume's state (online/offline)
+ volume_state_attributes = volume_attributes.get_child_by_name(
+ 'volume-state-attributes')
+ current_state = volume_state_attributes.get_child_content('state')
+ is_online = None
+ if current_state == "online":
+ is_online = True
+ elif current_state == "offline":
+ is_online = False
+ return_value = {
+ 'name': self.name,
+ 'size': current_size,
+ 'is_online': is_online,
+ }
+
+ return return_value
+
+ def create_volume(self):
+ create_parameters = {'volume': self.name,
+ 'containing-aggr-name': self.aggregate_name,
+ 'size': str(self.size),
+ }
+ if self.junction_path:
+ create_parameters['junction-path'] = str(self.junction_path)
+ if self.export_policy != 'default':
+ create_parameters['export-policy'] = str(self.export_policy)
+ if self.snapshot_policy != 'default':
+ create_parameters['snapshot-policy'] = str(self.snapshot_policy)
+
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-create', **create_parameters)
+
+ try:
+ self.server.invoke_successfully(volume_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_volume(self):
+ if self.is_infinite:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.name})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.name, 'unmount-and-offline':
+ 'true'})
+
+ try:
+ self.server.invoke_successfully(volume_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename-async',
+ **{'volume-name': self.name, 'new-volume-name': str(
+ self.name)})
+ else:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename', **{'volume': self.name, 'new-volume-name': str(
+ self.name)})
+ try:
+ self.server.invoke_successfully(volume_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size-async',
+ **{'volume-name': self.name, 'new-size': str(
+ self.size)})
+ else:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size', **{'volume': self.name, 'new-size': str(
+ self.size)})
+ try:
+ self.server.invoke_successfully(volume_resize,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_volume_state(self):
+ """
+ Change volume's state (offline/online).
+
+ Note: 'is_infinite' needs to be set to True in order to change the
+ state of an Infinite Volume.
+ """
+ state_requested = None
+ if self.is_online:
+ # Requested state is 'online'.
+ state_requested = "online"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online',
+ **{'name': self.name})
+ else:
+ # Requested state is 'offline'.
+ state_requested = "offline"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline',
+ **{'name': self.name})
+ try:
+ self.server.invoke_successfully(volume_change_state,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
+ (self.name, state_requested, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ rename_volume = False
+ resize_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if str(volume_detail['size']) != str(self.size):
+ resize_volume = True
+ changed = True
+ if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
+ changed = True
+ if self.is_online is False:
+ # Volume is online, but requested state is offline
+ pass
+ else:
+ # Volume is offline but requested state is online
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+
+ else:
+ if resize_volume:
+ self.resize_volume()
+ if volume_detail['is_online'] is not \
+ None and volume_detail['is_online'] != \
+ self.is_online:
+ self.change_volume_state()
+ # Ensure re-naming is the last change made.
+ if rename_volume:
+ self.rename_volume()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py
new file mode 100644
index 00000000..0fc61afb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py
@@ -0,0 +1,610 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_gather_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(netapp.ontap.na_ontap_info) instead.
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+- community.general._netapp.na_ontap
+
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
+ "net_ifgrp_info",
+ "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
+ "nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
+ "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
+ "security_login_account_info", "storage_failover_info", "volume_info",
+ "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported facts for your system.
+ default: "all"
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info (Password Authentication)
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+- ansible.builtin.debug:
+ var: ontap_facts
+- name: Limit Fact Gathering to Aggregate Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+- name: Limit Fact Gathering to Volume and Lun Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+- name: Gather all facts except for volume and lun information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+'''
+
+RETURN = '''
+ontap_facts:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_facts": {
+ "aggregate_info": {...},
+ "cluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_port_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "volume_info": {...},
+ "lun_info": {...},
+ "storage_failover_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "ontap_version": {...},
+ "igroup_info": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...}
+ }'
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherFacts(object):
+ '''Class with gather facts methods'''
+
+ def __init__(self, module):
+ self.module = module
+ self.netapp_info = dict()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.fact_subsets = {
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'field': 'interface-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'field': ('node', 'port'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'field': 'node-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'field': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'field': 'aggregate-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'field': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'field': ('node', 'key-id'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'field': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'field': 'subsystem',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, query=None):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+ try:
+ result = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ return result
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result
+ else:
+ self.module.fail_json(msg="Error calling API %s: %s"
+ % (call, to_native(error)), exception=traceback.format_exc())
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.fact_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query)
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
+ '''Method to run a generic get-iter call'''
+
+ generic_call = self.call_api(call, query)
+
+ if call == 'net-port-ifgrp-get':
+ children = 'attributes'
+ else:
+ children = 'attributes-list'
+
+ if generic_call is None:
+ return None
+
+ if field is None:
+ out = []
+ else:
+ out = {}
+
+ attributes_list = generic_call.get_child_by_name(children)
+
+ if attributes_list is None:
+ return None
+
+ for child in attributes_list.get_children():
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ if isinstance(field, str):
+ unique_key = _finditem(dic, field)
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ elif isinstance(field, tuple):
+ unique_key = ':'.join([_finditem(dic, el) for el in field])
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ else:
+ out.append(convert_keys(json.loads(json.dumps(dic))))
+
+ return out
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
+
+ self.netapp_info['ontap_version'] = self.ontapi()
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ for subset in run_subset:
+ call = self.fact_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.fact_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, key):
+
+ value = __finditem(obj, key)
+ if value is not None:
+ return value
+ raise KeyError(key)
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ out = {}
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ else:
+ return d_param
+ return out
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ if gather_subset is None:
+ gather_subset = ['all']
+ gf_obj = NetAppONTAPGatherFacts(module)
+ gf_all = gf_obj.get_all(gather_subset)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py
new file mode 100644
index 00000000..248fd105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py
@@ -0,0 +1,1304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - The C(nagios) module is not idempotent.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ e.g., C(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ type: str
+ host:
+ description:
+ - Host to operate on in Nagios.
+ type: str
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ type: str
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) and C(acknowledge) action.
+ type: str
+ default: Ansible
+ comment:
+ description:
+ - Comment for C(downtime) and C(acknowledge)action.
+ type: str
+ default: Scheduling downtime
+ start:
+ description:
+ - When downtime should start, in time_t format (epoch seconds).
+ version_added: '0.2.0'
+ type: str
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ type: int
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ type: str
+ servicegroup:
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ type: str
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ type: str
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+- name: Set 30 minutes of apache downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00
+ community.general.nagios:
+ action: downtime
+ start: 1555984800
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime, with a comment describing the reason
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
+
+- name: Schedule downtime for ALL services on HOST
+ community.general.nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule downtime for a few services
+ community.general.nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all services in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all host in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Delete all downtime for a given host
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+- name: Delete all downtime for HOST with a particular comment
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
+
+- name: Acknowledge an HOST with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: 'power outage - see casenr 12345'
+
+- name: Acknowledge an active service problem for the httpd service with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: httpd
+ host: '{{ inventory_hostname }}'
+ comment: 'service crashed - see casenr 12345'
+
+- name: Reset a passive service check for snmp trap
+ community.general.nagios:
+ action: forced_check
+ service: snmp
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for the httpd service
+ community.general.nagios:
+ action: forced_check
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for all services of a particular host
+ community.general.nagios:
+ action: forced_check
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for a particular host
+ community.general.nagios:
+ action: forced_check
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Enable SMART disk alerts
+ community.general.nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
+
+- name: Disable httpd and nfs alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
+
+- name: Disable HOST alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Silence ALL alerts
+ community.general.nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
+
+- name: Unsilence all alerts
+ community.general.nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
+
+- name: Shut up nagios
+ community.general.nagios:
+ action: silence_nagios
+
+- name: Annoy me negios
+ community.general.nagios:
+ action: unsilence_nagios
+
+- name: Command something
+ community.general.nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
+'''
+
+import time
+import os.path
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+######################################################################
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ 'acknowledge',
+ 'forced_check',
+ ]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ start=dict(required=False, default=None),
+ minutes=dict(default=30, type='int'),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ start = module.params['start']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # acknowledge = (service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ######################################################################
+ if action == 'acknowledge':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to acknowledge')
+
+ ##################################################################
+ if action == 'forced_check':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to check')
+
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ if kwargs['start'] is not None:
+ self.start = int(kwargs['start'])
+ else:
+ self.start = None
+ self.minutes = kwargs['minutes']
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ if not os.path.exists(self.cmdfile):
+ self.module.fail_json(msg='nagios command file does not exist',
+ cmdfile=self.cmdfile)
+ if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode):
+ self.module.fail_json(msg='nagios command file is not a fifo file',
+ cmdfile=self.cmdfile)
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_ack_str(self, cmd, host, author=None,
+ comment=None, svc=None, sticky=0, notify=1, persistent=0):
+ """
+ Format an external-command acknowledge string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ svc - Service to schedule downtime for, omit when for host downtime
+ sticky - the acknowledgement will remain until the host returns to an UP state if set to 1
+ notify - a notification will be sent out to contacts
+ persistent - survive across restarts of the Nagios process
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ ack_args = [str(sticky), str(notify), str(persistent), author, comment]
+
+ ack_arg_str = ";".join(ack_args)
+ ack_str = hdr + ack_arg_str + "\n"
+
+ return ack_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_chk_str(self, cmd, host, svc=None, start=None):
+ """
+ Format an external-command forced host or service check string.
+
+ cmd - Nagios command ID
+ host - Host to check service from
+ svc - Service to check
+ start - check time
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>];<check_time>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if start is None:
+ start = entry_time + 3
+
+ if svc is None:
+ chk_args = [str(start)]
+ else:
+ chk_args = [svc, str(start)]
+
+ chk_arg_str = ";".join(chk_args)
+ chk_str = hdr + chk_arg_str + "\n"
+
+ return chk_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def acknowledge_svc_problem(self, host, services=None):
+ """
+ This command is used to acknowledge a particular
+ service problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_SVC_PROBLEM"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service)
+ self._write_command(ack_cmd_str)
+
+ def acknowledge_host_problem(self, host):
+ """
+ This command is used to acknowledge a particular
+ host problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;
+ <persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_HOST_PROBLEM"
+ ack_cmd_str = self._fmt_ack_str(cmd, host)
+ self._write_command(ack_cmd_str)
+
+ def schedule_forced_host_check(self, host):
+ """
+ This command schedules a forced active check for a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_CHECK"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_host_svc_check(self, host):
+ """
+ This command schedules a forced active check for all services
+ associated with a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_svc_check(self, host, services=None):
+ """
+ This command schedules a forced active check for a particular
+ service.
+
+ Syntax: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_SVC_CHECK"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service)
+ self._write_command(chk_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes,
+ start=self.start)
+
+ elif self.action == 'acknowledge':
+ if self.services == 'host':
+ self.acknowledge_host_problem(self.host)
+ else:
+ self.acknowledge_svc_problem(self.host, services=self.services)
+
+ elif self.action == 'delete_downtime':
+ if self.services == 'host':
+ self.delete_host_downtime(self.host)
+ elif self.services == 'all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == 'forced_check':
+ if self.services == 'host':
+ self.schedule_forced_host_check(self.host)
+ elif self.services == 'all':
+ self.schedule_forced_host_svc_check(self.host)
+ else:
+ self.schedule_forced_svc_check(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" %
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py
new file mode 100644
index 00000000..fc62aa70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py
@@ -0,0 +1,878 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudflare_dns
+author:
+- Michael Gruener (@mgruener)
+requirements:
+ - python >= 2.6
+short_description: Manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ api_token:
+ description:
+ - API token.
+ - Required for api token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ version_added: '0.2.0'
+ account_api_key:
+ description:
+ - Account API key.
+ - Required for api keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ aliases: [ account_api_token ]
+ account_email:
+ description:
+ - Account email. Required for api keys authentication.
+ type: str
+ required: false
+ algorithm:
+ description:
+ - Algorithm number.
+ - Required for C(type=DS) and C(type=SSHFP) when C(state=present).
+ type: int
+ cert_usage:
+ description:
+ - Certificate usage number.
+ - Required for C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ hash_type:
+ description:
+ - Hash type number.
+ - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 1, 2 ]
+ key_tag:
+ description:
+ - DNSSEC key tag.
+ - Needed for C(type=DS) when C(state=present).
+ type: int
+ port:
+ description:
+ - Service port.
+ - Required for C(type=SRV) and C(type=TLSA).
+ type: int
+ priority:
+ description:
+ - Record priority.
+ - Required for C(type=MX) and C(type=SRV)
+ default: 1
+ type: int
+ proto:
+ description:
+ - Service protocol. Required for C(type=SRV) and C(type=TLSA).
+ - Common values are TCP and UDP.
+ - Before Ansible 2.6 only TCP and UDP were available.
+ type: str
+ proxied:
+ description:
+ - Proxy through Cloudflare network or just use DNS.
+ type: bool
+ default: no
+ record:
+ description:
+ - Record to add.
+ - Required if C(state=present).
+ - Default is C(@) (e.g. the zone name).
+ type: str
+ default: '@'
+ aliases: [ name ]
+ selector:
+ description:
+ - Selector number.
+ - Required for C(type=TLSA) when C(state=present).
+ choices: [ 0, 1 ]
+ type: int
+ service:
+ description:
+ - Record service.
+ - Required for C(type=SRV)
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state=present).
+ - This will delete all other records with the same record name and type.
+ type: bool
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls.
+ type: int
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ type: int
+ default: 1
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present).
+ - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7.
+ type: str
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ value:
+ description:
+ - The record value.
+ - Required for C(state=present).
+ type: str
+ aliases: [ content ]
+ weight:
+ description:
+ - Service weight.
+ - Required for C(type=SRV).
+ type: int
+ default: 1
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com").
+ - The Zone must already exist.
+ type: str
+ required: true
+ aliases: [ domain ]
+'''
+
+EXAMPLES = r'''
+- name: Create a test.example.net A record to point to 127.0.0.1
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ register: record
+
+- name: Create a record using api token
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ api_token: dummyapitoken
+
+- name: Create a example.net CNAME record to example.com
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ ttl: 600
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: absent
+
+- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ proxied: yes
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+# This deletes all other TXT records named "test.example.net"
+- name: Create TXT record "test.example.net" with value "unique value"
+ community.general.cloudflare_dns:
+ domain: example.net
+ record: test
+ type: TXT
+ value: unique value
+ solo: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Create an SRV record _foo._tcp.example.net
+ community.general.cloudflare_dns:
+ domain: example.net
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.example.net
+
+- name: Create a SSHFP record login.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: login
+ type: SSHFP
+ algorithm: 4
+ hash_type: 2
+ value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
+
+- name: Create a TLSA record _25._tcp.mail.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: mail
+ port: 25
+ proto: tcp
+ type: TLSA
+ cert_usage: 3
+ selector: 1
+ hash_type: 1
+ value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+
+- name: Create a DS record for subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: DS
+ key_tag: 5464
+ algorithm: 8
+ hash_type: 2
+ value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP or TLSA
+ type: dict
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: False
+ meta:
+ description: No documentation available.
+ returned: success
+ type: dict
+ sample: { auto_added: false }
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+def lowercase_string(param):
+ if not isinstance(param, str):
+ return param
+ return param.lower()
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.api_token = module.params['api_token']
+ self.account_api_key = module.params['account_api_key']
+ self.account_email = module.params['account_email']
+ self.algorithm = module.params['algorithm']
+ self.cert_usage = module.params['cert_usage']
+ self.hash_type = module.params['hash_type']
+ self.key_tag = module.params['key_tag']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = lowercase_string(module.params['proto'])
+ self.proxied = module.params['proxied']
+ self.selector = module.params['selector']
+ self.record = lowercase_string(module.params['record'])
+ self.service = lowercase_string(module.params['service'])
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = lowercase_string(module.params['zone'])
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.').lower()
+
+ if (self.type == 'AAAA') and (self.value is not None):
+ self.value = self.value.lower()
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if (self.type == 'TLSA'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.port is not None):
+ self.port = '_' + str(self.port)
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ if (self.type == 'DS'):
+ if self.record == self.zone:
+ self.module.fail_json(msg="DS records only apply to subdomains.")
+
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ if self.api_token:
+ headers = {
+ 'Authorization': 'Bearer ' + self.api_token,
+ 'Content-Type': 'application/json',
+ }
+ else:
+ headers = {
+ 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_key,
+ 'Content-Type': 'application/json',
+ }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ # Without a valid/parsed JSON response no more error processing can be done
+ if result is None:
+ self.module.fail_json(msg=error_msg)
+
+ if 'success' not in result:
+ error_msg += "; Unexpected error details: {0}".format(result.get('error'))
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call, query = api_call.split('?', 1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self, zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self, name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urlencode(query)
+
+ records, status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self, **kwargs):
+ params = {}
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ elif params['type'] == 'DS':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'SSHFP':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'TLSA':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self, **kwargs):
+ params = {}
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ if params['type'] == 'DS':
+ for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
+ ds_data = {
+ "key_tag": params['key_tag'],
+ "algorithm": params['algorithm'],
+ "digest_type": params['hash_type'],
+ "digest": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': ds_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'SSHFP':
+ for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
+ sshfp_data = {
+ "fingerprint": params['value'],
+ "type": params['hash_type'],
+ "algorithm": params['algorithm'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': sshfp_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'TLSA':
+ for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ tlsa_data = {
+ "usage": params['cert_usage'],
+ "selector": params['selector'],
+ "matching_type": params['hash_type'],
+ "certificate": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": search_record,
+ 'data': tlsa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] != new_record['data']):
+ do_update = True
+ if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
+ self.changed = True
+ return result, self.changed
+ else:
+ return records, self.changed
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
+ self.changed = True
+ return result, self.changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_token=dict(type='str', required=False, no_log=True),
+ account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str', required=False),
+ algorithm=dict(type='int'),
+ cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ hash_type=dict(type='int', choices=[1, 2]),
+ key_tag=dict(type='int'),
+ port=dict(type='int'),
+ priority=dict(type='int', default=1),
+ proto=dict(type='str'),
+ proxied=dict(type='bool', default=False),
+ record=dict(type='str', default='@', aliases=['name']),
+ selector=dict(type='int', choices=[0, 1]),
+ service=dict(type='str'),
+ solo=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ timeout=dict(type='int', default=30),
+ ttl=dict(type='int', default=1),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ value=dict(type='str', aliases=['content']),
+ weight=dict(type='int', default=1),
+ zone=dict(type='str', required=True, aliases=['domain']),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['record', 'type', 'value']),
+ ('state', 'absent', ['record']),
+ ('type', 'SRV', ['proto', 'service']),
+ ('type', 'TLSA', ['proto', 'port']),
+ ],
+ )
+
+ if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
+ module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
+ if module.params['type'] == 'SRV':
+ if not ((module.params['weight'] is not None and module.params['port'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['weight'] is None and module.params['port'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'SSHFP':
+ if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'TLSA':
+ if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'DS':
+ if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
+
+ module.exit_json(changed=changed, result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py
new file mode 100644
index 00000000..1c814a9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+short_description: Interface with dnsimple.com (a DNS hosting service)
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+notes:
+ - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API.
+options:
+ account_email:
+ description:
+ - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
+ type: str
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for more information.
+ type: str
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ type: str
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ type: str
+ record_ids:
+ description:
+ - List of records to ensure they either exist or do not exist.
+ type: list
+ type:
+ description:
+ - The type of DNS record to create.
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ type: str
+ ttl:
+ description:
+ - The TTL to give the new record in seconds.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Record value.
+ - Must be specified when trying to ensure a record exists.
+ type: str
+ priority:
+ description:
+ - Record priority.
+ type: int
+ state:
+ description:
+ - whether the record should exist or not.
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state) is set to C(present) on a record.
+ type: 'bool'
+ default: no
+requirements:
+ - "dnsimple >= 1.0.0"
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+- name: Authenticate using email and API token and fetch all domains
+ community.general.dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
+
+- name: Fetch my.com domain records
+ community.general.dnsimple:
+ domain: my.com
+ state: present
+ delegate_to: localhost
+ register: records
+
+- name: Delete a domain
+ community.general.dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
+
+- name: Create a test.my.com A record to point to 127.0.0.1
+ community.general.dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
+ register: record
+
+- name: Delete record using record_ids
+ community.general.dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ state: absent
+ delegate_to: localhost
+
+- name: Create a my.com CNAME record to example.com
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
+
+- name: Change TTL value for a record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
+
+- name: Delete the record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r"""# """
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+DNSIMPLE_IMP_ERR = None
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import __version__ as dnsimple_version
+ from dnsimple.dnsimple import DNSimpleException
+ HAS_DNSIMPLE = True
+except ImportError:
+ DNSIMPLE_IMP_ERR = traceback.format_exc()
+ HAS_DNSIMPLE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_email=dict(type='str'),
+ account_api_token=dict(type='str', no_log=True),
+ domain=dict(type='str'),
+ record=dict(type='str'),
+ record_ids=dict(type='list'),
+ type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL']),
+ ttl=dict(type='int', default=3600),
+ value=dict(type='str'),
+ priority=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ solo=dict(type='bool', default=False),
+ ),
+ required_together=[
+ ['record', 'value']
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR)
+
+ if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'):
+ module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated."
+ " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version)
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+
+ # state is absent
+ else:
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain), params={'name': record})]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['priority'] != priority:
+ data = {}
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'type': record_type,
+ 'content': value,
+ }
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+
+ # state is absent
+ else:
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+
+ # state is absent
+ else:
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ except DNSimpleException as e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py
new file mode 100644
index 00000000..75135c82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
+description:
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ type: str
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
+ required: true
+ type: str
+
+ sandbox:
+ description:
+ - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
+ type: bool
+ default: 'no'
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
+ type: str
+
+ record_type:
+ description:
+ - Record type.
+ choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ type: str
+
+ record_value:
+ description:
+ - >
+ Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
+ SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ type: str
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ default: 1800
+ type: int
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+ monitor:
+ description:
+ - If C(yes), add or change the monitor. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ systemDescription:
+ description:
+ - Description used by the monitor.
+ default: ''
+ type: str
+
+ maxEmails:
+ description:
+ - Number of emails sent to the contact list by the monitor.
+ default: 1
+ type: int
+
+ protocol:
+ description:
+ - Protocol used by the monitor.
+ default: 'HTTP'
+ choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
+ type: str
+
+ port:
+ description:
+ - Port used by the monitor.
+ default: 80
+ type: int
+
+ sensitivity:
+ description:
+ - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
+ default: 'Medium'
+ choices: ['Low', 'Medium', 'High']
+ type: str
+
+ contactList:
+ description:
+ - Name or id of the contact list that the monitor will notify.
+ - The default C('') means the Account Owner.
+ default: ''
+ type: str
+
+ httpFqdn:
+ description:
+ - The fully qualified domain name used by the monitor.
+ type: str
+
+ httpFile:
+ description:
+ - The file at the Fqdn that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ httpQueryString:
+ description:
+ - The string in the httpFile that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ failover:
+ description:
+ - If C(yes), add or change the failover. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ autoFailover:
+ description:
+ - If true, fallback to the primary IP address is manual after a failover.
+ - If false, fallback to the primary IP address is automatic after a failover.
+ type: bool
+ default: 'no'
+
+ ip1:
+ description:
+ - Primary IP address for the failover.
+ - Required if adding or changing the monitor or failover.
+ type: str
+
+ ip2:
+ description:
+ - Secondary IP address for the failover.
+ - Required if adding or changing the failover.
+ type: str
+
+ ip3:
+ description:
+ - Tertiary IP address for the failover.
+ type: str
+
+ ip4:
+ description:
+ - Quaternary IP address for the failover.
+ type: str
+
+ ip5:
+ description:
+ - Quinary IP address for the failover.
+ type: str
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
+ These values can be be registered and used in your playbooks.
+ - Only A records can have a monitor or failover.
+ - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
+ - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
+ - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+- name: Fetch my.com domain records
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ register: response
+
+- name: Create a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+
+- name: Update the previously created record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
+
+- name: Fetch a specific record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ register: response
+
+- name: Delete a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ record_type: A
+ state: absent
+ record_name: test
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ ip3: 127.0.0.4
+ ip4: 127.0.0.5
+ ip5: 127.0.0.6
+
+- name: Add a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: my contact list
+
+- name: Add a monitor with http options
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: 1174 # contact list id
+ httpFqdn: http://my.com
+ httpFile: example
+ httpQueryString: some string
+
+- name: Add a monitor and a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ monitor: yes
+ protocol: HTTPS
+ port: 443
+ maxEmails: 1
+ systemDescription: monitoring my.com status
+ contactList: emergencycontacts
+
+- name: Remove a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: no
+
+- name: Remove a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: no
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import json
+import hashlib
+import hmac
+import locale
+from time import strftime, gmtime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six import string_types
+
+
+class DME2(object):
+
+ def __init__(self, apikey, secret, domain, sandbox, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+
+ if sandbox:
+ self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
+ self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
+ else:
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+ self.contactList_map = None # ["contactList_name"] => ID
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+ self.monitor_url = 'monitor'
+ self.contactList_url = 'contactList'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ locale.setlocale(locale.LC_TIME, 'C')
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, string_types):
+ data = urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ # Note that TXT records are surrounded by quotes in the API response.
+ elif record_type == "TXT":
+ value = '"{0}"'.format(record_value)
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ # @TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ # @TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+ def getMonitor(self, record_id):
+ return self.query(self.monitor_url + '/' + str(record_id), 'GET')
+
+ def updateMonitor(self, record_id, data):
+ return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
+
+ def prepareMonitor(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def getContactList(self, contact_list_id):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.contactLists.get(contact_list_id, False)
+
+ def getContactlists(self):
+ return self.query(self.contactList_url, 'GET')['data']
+
+ def getContactListByName(self, name):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.getContactList(self.contactList_map.get(name, 0))
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True, no_log=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ sandbox=dict(default=False, type='bool'),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ monitor=dict(default=False, type='bool'),
+ systemDescription=dict(default=''),
+ maxEmails=dict(default=1, type='int'),
+ protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
+ port=dict(default=80, type='int'),
+ sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
+ contactList=dict(default=None),
+ httpFqdn=dict(required=False),
+ httpFile=dict(required=False),
+ httpQueryString=dict(required=False),
+ failover=dict(default=False, type='bool'),
+ autoFailover=dict(default=False, type='bool'),
+ ip1=dict(required=False),
+ ip2=dict(required=False),
+ ip3=dict(required=False),
+ ip4=dict(required=False),
+ ip5=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_together=[
+ ['record_value', 'record_ttl', 'record_type']
+ ],
+ required_if=[
+ ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
+ ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
+ ]
+ )
+
+ protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
+ sensitivities = dict(Low=8, Medium=5, High=3)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module.params["sandbox"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Fetch existing monitor if the A record indicates it should exist and build the new monitor
+ current_monitor = dict()
+ new_monitor = dict()
+ if current_record and current_record['type'] == 'A':
+ current_monitor = DME.getMonitor(current_record['id'])
+
+ # Build the new monitor
+ for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
+ 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
+ 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
+ if module.params[i] is not None:
+ if i == 'protocol':
+ # The API requires protocol to be a numeric in the range 1-6
+ new_monitor['protocolId'] = protocols[module.params[i]]
+ elif i == 'sensitivity':
+ # The API requires sensitivity to be a numeric of 8, 5, or 3
+ new_monitor[i] = sensitivities[module.params[i]]
+ elif i == 'contactList':
+ # The module accepts either the name or the id of the contact list
+ contact_list_id = module.params[i]
+ if not contact_list_id.isdigit() and contact_list_id != '':
+ contact_list = DME.getContactListByName(contact_list_id)
+ if not contact_list:
+ module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
+ contact_list_id = contact_list.get('id', '')
+ new_monitor['contactListId'] = contact_list_id
+ else:
+ # The module option names match the API field names
+ new_monitor[i] = module.params[i]
+
+ # Compare new record against existing one
+ record_changed = False
+ if current_record:
+ for i in new_record:
+ # Remove leading and trailing quote character from values because TXT records
+ # are surrounded by quotes.
+ if str(current_record[i]).strip('"') != str(new_record[i]):
+ record_changed = True
+ new_record['id'] = str(current_record['id'])
+
+ monitor_changed = False
+ if current_monitor:
+ for i in new_monitor:
+ if str(current_monitor.get(i)) != str(new_monitor[i]):
+ monitor_changed = True
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if "value" not in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ # create record and monitor as the record does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ if new_monitor.get('monitor') and record_type == "A":
+ monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
+ module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
+ else:
+ module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
+
+ # update the record
+ updated = False
+ if record_changed:
+ DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
+ updated = True
+ if monitor_changed:
+ DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
+ updated = True
+ if updated:
+ module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ elif state == 'absent':
+ changed = False
+ # delete the record (and the monitor/failover) if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=changed)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py
new file mode 100644
index 00000000..848cc1fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: haproxy
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
+author:
+- Ravi Bhure (@ravibhure)
+description:
+ - Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
+notes:
+ - Enable, disable and drain commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
+ - Depends on netcat (nc) being available; you need to install the appropriate
+ package for your operating system before this module can be used.
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ - If this parameter is unset, it will be auto-detected.
+ type: str
+ drain:
+ description:
+ - Wait until the server has no active connections or until the timeout
+ determined by wait_interval and wait_retries is reached.
+ - Continue only after the status changes to 'MAINT'.
+ - This overrides the shutdown_sessions option.
+ type: bool
+ default: false
+ host:
+ description:
+ - Name of the backend host to change.
+ type: str
+ required: true
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server.
+ - This can be used to terminate long-running sessions after a server is put
+ into maintenance mode. Overridden by the drain option.
+ type: bool
+ default: no
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ type: path
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ - Note that C(drain) state was added in version 2.4.
+ - It is supported only by HAProxy version 1.5 or later,
+ - When used on versions < 1.5, it will be ignored.
+ type: str
+ required: true
+ choices: [ disabled, drain, enabled ]
+ agent:
+ description:
+ - Disable/enable agent checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: 1.0.0
+ health:
+ description:
+ - Disable/enable health checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: "1.0.0"
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ type: bool
+ default: no
+ wait:
+ description:
+ - Wait until the server reports a status of 'UP' when C(state=enabled),
+ status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain)
+ type: bool
+ default: no
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ type: int
+ default: 5
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ type: int
+ default: 25
+ weight:
+ description:
+ - The value passed in argument.
+ - If the value ends with the `%` sign, then the new weight will be
+ relative to the initially configured weight.
+ - Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Disable server in 'www' backend pool
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Disable server in 'www' backend pool, also stop health/agent checks
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ health: yes
+ agent: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool)
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+
+- name: Disable server, provide socket file
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+
+- name: Disable server, provide socket file, wait until status reports in maintenance
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+
+# Place server in drain mode, providing a socket file. Then check the server's
+# status every minute to see if it changes to maintenance mode, continuing if it
+# does in an hour and failing otherwise.
+- community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+ drain: yes
+ wait_interval: 1
+ wait_retries: 60
+
+- name: Disable backend server in 'www' backend pool and drop open sessions to it
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: yes
+
+- name: Enable server in 'www' backend pool
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Enable server in 'www' backend pool wait until healthy
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+
+- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+ wait_retries: 10
+ wait_interval: 5
+
+- name: Enable server in 'www' backend pool with change server(s) weight
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
+
+- name: Set the server in 'www' backend pool to drain mode
+ community.general.haproxy:
+ state: drain
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+'''
+
+import csv
+import socket
+import time
+from string import Template
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_text
+
+
+DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled', 'drain']
+WAIT_RETRIES = 25
+WAIT_INTERVAL = 5
+
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.agent = self.module.params['agent']
+ self.health = self.module.params['health']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self._drain = self.module.params['drain']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall(to_bytes('%s\n' % cmd))
+
+ result = b''
+ buf = b''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+ def discover_version(self):
+ """
+ Attempt to extract the haproxy version.
+ Return a tuple containing major and minor version.
+ """
+ data = self.execute('show info', 200, False)
+ lines = data.splitlines()
+ line = [x for x in lines if 'Version:' in x]
+ try:
+ version_values = line[0].partition(':')[2].strip().split('.', 3)
+ version = (int(version_values[0]), int(version_values[1]))
+ except (ValueError, TypeError, IndexError):
+ version = None
+
+ return version
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found) and state is None:
+ self.module.fail_json(
+ msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ if state is not None:
+ self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
+ filter(lambda d: (pxname is None or d['pxname']
+ == pxname) and d['svname'] == svname, r)
+ )
+ )
+ return state or None
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
+ if status in state[0]['status']:
+ if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state):
+ return True
+ else:
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
+ (pxname, svname, status, self.wait_retries))
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if self.agent:
+ cmd += "; enable agent $pxname/$svname"
+ if self.health:
+ cmd += "; enable health $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname"
+ if self.agent:
+ cmd += "; disable agent $pxname/$svname"
+ if self.health:
+ cmd += "; disable health $pxname/$svname"
+ cmd += "; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+ def drain(self, host, backend, status='DRAIN'):
+ """
+ Drain action, sets the server to DRAIN mode.
+ In this mode mode, the server will not accept any new connections
+ other than those that are accepted via persistence.
+ """
+ haproxy_version = self.discover_version()
+
+ # check if haproxy version suppots DRAIN state (starting with 1.5)
+ if haproxy_version and (1, 5) <= haproxy_version:
+ cmd = "set server $pxname/$svname state drain"
+ self.execute_for_backends(cmd, backend, host, status)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
+
+ # toggle enable/disbale server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled' and self._drain:
+ self.drain(self.host, self.backend, status='MAINT')
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ elif self.state == 'drain':
+ self.drain(self.host, self.backend)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
+
+ # Report change status
+ self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
+
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=ACTION_CHOICES),
+ host=dict(type='str', required=True),
+ backend=dict(type='str'),
+ weight=dict(type='str'),
+ socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(type='bool', default=False),
+ fail_on_not_found=dict(type='bool', default=False),
+ health=dict(type='bool', default=False),
+ agent=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_retries=dict(type='int', default=WAIT_RETRIES),
+ wait_interval=dict(type='int', default=WAIT_INTERVAL),
+ drain=dict(type='bool', default=False),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py
new file mode 100644
index 00000000..a57e0ab8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip
+short_description: Manage Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip_info
+ description: Retrieve information on failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+ state:
+ description:
+ - Defines whether the IP will be routed or not.
+ - If set to C(routed), I(value) must be specified.
+ type: str
+ choices:
+ - routed
+ - unrouted
+ default: routed
+ value:
+ description:
+ - The new value for the failover IP address.
+ - Required when setting I(state) to C(routed).
+ type: str
+ timeout:
+ description:
+ - Timeout to use when routing or unrouting the failover IP.
+ - Note that the API call returns when the failover IP has been
+ successfully routed to the new address, respectively successfully
+ unrouted.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Set value of failover IP 1.2.3.4 to 5.6.7.8
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+
+- name: Set value of failover IP 1.2.3.4 to unrouted
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ state: unrouted
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover,
+ set_failover,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ state=dict(type='str', default='routed', choices=['routed', 'unrouted']),
+ value=dict(type='str'),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'routed', ['value']),
+ ),
+ )
+
+ failover_ip = module.params['failover_ip']
+ value = get_failover(module, failover_ip)
+ changed = False
+ before = get_failover_state(value)
+
+ if module.params['state'] == 'routed':
+ new_value = module.params['value']
+ else:
+ new_value = None
+
+ if value != new_value:
+ if module.check_mode:
+ value = new_value
+ changed = True
+ else:
+ value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout'])
+
+ after = get_failover_state(value)
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ **after
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py
new file mode 100644
index 00000000..4d6f9f37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip_info
+short_description: Retrieve information on Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Retrieve information on Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip
+ description: Manage failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+'''
+
+EXAMPLES = r'''
+- name: Get value of failover IP 1.2.3.4
+ community.general.hetzner_failover_ip_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+ register: result
+
+- name: Print value of failover IP 1.2.3.4 in case it is routed
+ ansible.builtin.debug:
+ msg: "1.2.3.4 routes to {{ result.value }}"
+ when: result.state == 'routed'
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+failover_ip:
+ description:
+ - The failover IP.
+ returned: success
+ type: str
+ sample: '1.2.3.4'
+failover_netmask:
+ description:
+ - The netmask for the failover IP.
+ returned: success
+ type: str
+ sample: '255.255.255.255'
+server_ip:
+ description:
+ - The main IP of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: str
+server_number:
+ description:
+ - The number of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: int
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover_record,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ failover = get_failover_record(module, module.params['failover_ip'])
+ result = get_failover_state(failover['active_server_ip'])
+ result['failover_ip'] = failover['ip']
+ result['failover_netmask'] = failover['netmask']
+ result['server_ip'] = failover['server_ip']
+ result['server_number'] = failover['server_number']
+ result['changed'] = False
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py
new file mode 100644
index 00000000..ade9bd95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py
@@ -0,0 +1,509 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+ - Note that idempotency check for TCP flags simply compares strings and doesn't
+ try to interpret the rules. This might change in the future.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall_info
+ description: Retrieve information on firewall configuration.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ required: yes
+ type: str
+ port:
+ description:
+ - Switch port of firewall.
+ type: str
+ choices: [ main, kvm ]
+ default: main
+ state:
+ description:
+ - Status of the firewall.
+ - Firewall is active if state is C(present), and disabled if state is C(absent).
+ type: str
+ default: present
+ choices: [ present, absent ]
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ suboptions:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ ip_version:
+ description:
+ - Internet protocol version.
+ - Note that currently, only IPv4 is supported by Hetzner.
+ required: yes
+ type: str
+ choices: [ ipv4, ipv6 ]
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ - Flags supported by Hetzner are C(syn), C(fin), C(rst), C(psh) and C(urg).
+ - They can be combined with C(|) (logical or) and C(&) (logical and).
+ - See L(the documentation,https://wiki.hetzner.de/index.php/Robot_Firewall/en#Parameter)
+ for more information.
+ type: str
+ action:
+ description:
+ - Action if rule matches.
+ required: yes
+ type: str
+ choices: [ accept, discard ]
+ update_timeout:
+ description:
+ - Timeout to use when configuring the firewall.
+ - Note that the API call returns before the firewall has been
+ successfully set up.
+ type: int
+ default: 30
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Configure firewall for server with main IP 1.2.3.4
+ community.general.hetzner_firewall:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ state: present
+ whitelist_hos: yes
+ rules:
+ input:
+ - name: Allow everything to ports 20-23 from 4.3.2.1/24
+ ip_version: ipv4
+ src_ip: 4.3.2.1/24
+ dst_port: '20-23'
+ action: accept
+ - name: Allow everything to port 443
+ ip_version: ipv4
+ dst_port: '443'
+ action: accept
+ - name: Drop everything else
+ ip_version: ipv4
+ action: discard
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.compat import ipaddress as compat_ipaddress
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+
+
+RULE_OPTION_NAMES = [
+ 'name', 'ip_version', 'dst_ip', 'dst_port', 'src_ip', 'src_port',
+ 'protocol', 'tcp_flags', 'action',
+]
+
+RULES = ['input']
+
+
+def restrict_dict(dictionary, fields):
+ result = dict()
+ for k, v in dictionary.items():
+ if k in fields:
+ result[k] = v
+ return result
+
+
+def restrict_firewall_config(config):
+ result = restrict_dict(config, ['port', 'status', 'whitelist_hos'])
+ result['rules'] = dict()
+ for ruleset in RULES:
+ result['rules'][ruleset] = [
+ restrict_dict(rule, RULE_OPTION_NAMES)
+ for rule in config['rules'].get(ruleset) or []
+ ]
+ return result
+
+
+def update(before, after, params, name):
+ bv = before.get(name)
+ after[name] = bv
+ changed = False
+ pv = params[name]
+ if pv is not None:
+ changed = pv != bv
+ if changed:
+ after[name] = pv
+ return changed
+
+
+def normalize_ip(ip, ip_version):
+ if ip is None:
+ return ip
+ if '/' in ip:
+ ip, range = ip.split('/')
+ else:
+ ip, range = ip, ''
+ ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
+ if range == '':
+ range = '32' if ip_version.lower() == 'ipv4' else '128'
+ return ip_addr + '/' + range
+
+
+def update_rules(before, after, params, ruleset):
+ before_rules = before['rules'][ruleset]
+ after_rules = after['rules'][ruleset]
+ params_rules = params['rules'][ruleset]
+ changed = len(before_rules) != len(params_rules)
+ for no, rule in enumerate(params_rules):
+ rule['src_ip'] = normalize_ip(rule['src_ip'], rule['ip_version'])
+ rule['dst_ip'] = normalize_ip(rule['dst_ip'], rule['ip_version'])
+ if no < len(before_rules):
+ before_rule = before_rules[no]
+ before_rule['src_ip'] = normalize_ip(before_rule['src_ip'], before_rule['ip_version'])
+ before_rule['dst_ip'] = normalize_ip(before_rule['dst_ip'], before_rule['ip_version'])
+ if before_rule != rule:
+ changed = True
+ after_rules.append(rule)
+ return changed
+
+
+def encode_rule(output, rulename, input):
+ for i, rule in enumerate(input['rules'][rulename]):
+ for k, v in rule.items():
+ if v is not None:
+ output['rules[{0}][{1}][{2}]'.format(rulename, i, k)] = v
+
+
+def create_default_rules_object():
+ rules = dict()
+ for ruleset in RULES:
+ rules[ruleset] = []
+ return rules
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ port=dict(type='str', default='main', choices=['main', 'kvm']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ whitelist_hos=dict(type='bool'),
+ rules=dict(type='dict', options=dict(
+ input=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ ip_version=dict(type='str', required=True, choices=['ipv4', 'ipv6']),
+ dst_ip=dict(type='str'),
+ dst_port=dict(type='str'),
+ src_ip=dict(type='str'),
+ src_port=dict(type='str'),
+ protocol=dict(type='str'),
+ tcp_flags=dict(type='str'),
+ action=dict(type='str', required=True, choices=['accept', 'discard']),
+ )),
+ )),
+ update_timeout=dict(type='int', default=30),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Sanitize input
+ module.params['status'] = 'active' if (module.params['state'] == 'present') else 'disabled'
+ if module.params['rules'] is None:
+ module.params['rules'] = {}
+ if module.params['rules'].get('input') is None:
+ module.params['rules']['input'] = []
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+ if not firewall_configured(result, error):
+ module.fail_json(msg='Firewall configuration cannot be read as it is not configured.')
+
+ full_before = result['firewall']
+ if not full_before.get('rules'):
+ full_before['rules'] = create_default_rules_object()
+ before = restrict_firewall_config(full_before)
+
+ # Build wanted (after) state and compare
+ after = dict(before)
+ changed = False
+ changed |= update(before, after, module.params, 'port')
+ changed |= update(before, after, module.params, 'status')
+ changed |= update(before, after, module.params, 'whitelist_hos')
+ after['rules'] = create_default_rules_object()
+ if module.params['status'] == 'active':
+ for ruleset in RULES:
+ changed |= update_rules(before, after, module.params, ruleset)
+
+ # Update if different
+ construct_result = True
+ construct_status = None
+ if changed and not module.check_mode:
+ # https://robot.your-server.de/doc/webservice/en.html#post-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(after)
+ data['whitelist_hos'] = str(data['whitelist_hos']).lower()
+ del data['rules']
+ for ruleset in RULES:
+ encode_rule(data, ruleset, after)
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=module.params['update_timeout'],
+ data=urlencode(data),
+ headers=headers,
+ )
+ if module.params['wait_for_configured'] and not firewall_configured(result, error):
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ skip_first=True,
+ )
+ except CheckDoneTimeoutException as e:
+ result, error = e.result, e.error
+ module.warn('Timeout while waiting for firewall to be configured.')
+
+ full_after = result['firewall']
+ if not full_after.get('rules'):
+ full_after['rules'] = create_default_rules_object()
+ construct_status = full_after['status']
+ if construct_status != 'in process':
+ # Only use result if configuration is done, so that diff will be ok
+ after = restrict_firewall_config(full_after)
+ construct_result = False
+
+ if construct_result:
+ # Construct result (used for check mode, and configuration still in process)
+ full_after = dict(full_before)
+ for k, v in after.items():
+ if k != 'rules':
+ full_after[k] = after[k]
+ if construct_status is not None:
+ # We want 'in process' here
+ full_after['status'] = construct_status
+ full_after['rules'] = dict()
+ for ruleset in RULES:
+ full_after['rules'][ruleset] = after['rules'][ruleset]
+
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ firewall=full_after,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py
new file mode 100644
index 00000000..fde06a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall_info
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall
+ description: Configure firewall.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ type: str
+ required: yes
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Get firewall configuration for server with main IP 1.2.3.4
+ community.general.hetzner_firewall_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.firewall }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+
+ firewall = result['firewall']
+ if not firewall.get('rules'):
+ firewall['rules'] = dict()
+ for ruleset in ['input']:
+ firewall['rules'][ruleset] = []
+
+ module.exit_json(
+ changed=False,
+ firewall=firewall,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py
new file mode 100644
index 00000000..ab41f680
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py
@@ -0,0 +1,565 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, <meiliu@fusionlayer.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: infinity
+short_description: Manage Infinity IPAM using Rest API
+description:
+ - Manage Infinity IPAM using REST API.
+author:
+ - Meirong Liu (@MeganLiu)
+options:
+ server_ip:
+ description:
+ - Infinity server_ip with IP address.
+ type: str
+ required: true
+ username:
+ description:
+ - Username to access Infinity.
+ - The user must have REST API privileges.
+ type: str
+ required: true
+ password:
+ description:
+ - Infinity password.
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ]
+ network_id:
+ description:
+ - Network ID.
+ type: str
+ default: ''
+ ip_address:
+ description:
+ - IP Address for a reservation or a release.
+ type: str
+ default: ''
+ network_address:
+ description:
+ - Network address with CIDR format (e.g., 192.168.310.0).
+ type: str
+ default: ''
+ network_size:
+ description:
+ - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
+ type: str
+ default: ''
+ network_name:
+ description:
+ - The name of a network.
+ type: str
+ default: ''
+ network_location:
+ description:
+ - The parent network id for a given network.
+ type: int
+ default: -1
+ network_type:
+ description:
+ - Network type defined by Infinity
+ type: str
+ choices: [ lan, shared_lan, supernet ]
+ default: lan
+ network_family:
+ description:
+ - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
+ type: str
+ choices: [ 4, 6, dual ]
+ default: 4
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ connection: local
+ strategy: debug
+ tasks:
+ - name: Reserve network into Infinity IPAM
+ community.general.infinity:
+ server_ip: 80.75.107.12
+ username: username
+ password: password
+ action: reserve_network
+ network_name: reserve_new_ansible_network
+ network_family: 4
+ network_type: lan
+ network_id: 1201
+ network_size: /28
+ register: infinity
+'''
+
+RETURN = r'''
+network_id:
+ description: id for a given network
+ returned: success
+ type: str
+ sample: '1501'
+ip_info:
+ description: when reserve next available ip address from a network, the ip address info ) is returned.
+ returned: success
+ type: str
+ sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
+network_info:
+ description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ returned: success
+ type: str
+ sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102,
+ "network_size": null,"description": null,"network_location": "3085",
+ "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "network_type": "lan","network_name": "'reserve_new_ansible_network'"}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, json
+from ansible.module_utils.urls import open_url
+
+
+class Infinity(object):
+ """
+ Class for manage REST API calls with the Infinity.
+ """
+
+ def __init__(self, module, server_ip, username, password):
+ self.module = module
+ self.auth_user = username
+ self.auth_pass = password
+ self.base_url = "https://%s/rest/v1/" % (str(server_ip))
+
+ def _get_api_call_ansible_handler(
+ self,
+ method='get',
+ resource_url='',
+ stat_codes=None,
+ params=None,
+ payload_data=None):
+ """
+ Perform the HTTPS request by using ansible get/delete method
+ """
+ stat_codes = [200] if stat_codes is None else stat_codes
+ request_url = str(self.base_url) + str(resource_url)
+ response = None
+ headers = {'Content-Type': 'application/json'}
+ if not request_url:
+ self.module.exit_json(
+ msg="When sending Rest api call , the resource URL is empty, please check.")
+ if payload_data and not isinstance(payload_data, str):
+ payload_data = json.dumps(payload_data)
+ response_raw = open_url(
+ str(request_url),
+ method=method,
+ timeout=20,
+ headers=headers,
+ url_username=self.auth_user,
+ url_password=self.auth_pass,
+ validate_certs=False,
+ force_basic_auth=True,
+ data=payload_data)
+
+ response = response_raw.read()
+ payload = ''
+ if response_raw.code not in stat_codes:
+ self.module.exit_json(
+ changed=False,
+ meta=" openurl response_raw.code show error and error code is %r" %
+ (response_raw.code))
+ else:
+ if isinstance(response, str) and len(response) > 0:
+ payload = response
+ elif method.lower() == 'delete' and response_raw.code == 204:
+ payload = 'Delete is done.'
+ if isinstance(payload, dict) and "text" in payload:
+ self.module.exit_json(
+ changed=False,
+ meta="when calling rest api, returned data is not json ")
+ raise Exception(payload["text"])
+ return payload
+
+ # ---------------------------------------------------------------------------
+ # get_network()
+ # ---------------------------------------------------------------------------
+ def get_network(self, network_id, network_name, limit=-1):
+ """
+ Search network_name inside Infinity by using rest api
+ Network id or network_name needs to be provided
+ return the details of a given with given network_id or name
+ """
+ if network_name is None and network_id is None:
+ self.module.exit_json(
+ msg="You must specify one of the options 'network_name' or 'network_id'.")
+ method = "get"
+ resource_url = ''
+ params = {}
+ response = None
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if network_id is None and network_name:
+ method = "get"
+ resource_url = "search"
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list) and len(
+ response) > 1 and limit == 1:
+ response = response[0]
+ response = json.dumps(response)
+ return response
+
+ # ---------------------------------------------------------------------------
+ # get_network_id()
+ # ---------------------------------------------------------------------------
+ def get_network_id(self, network_name="", network_type='lan'):
+ """
+ query network_id from Infinity via rest api based on given network_name
+ """
+ method = 'get'
+ resource_url = 'search'
+ response = None
+ if network_name is None:
+ self.module.exit_json(
+ msg="You must specify the option 'network_name'")
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ network_id = ""
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list):
+ response = response[0]
+ network_id = response['id']
+ return network_id
+
+ # ---------------------------------------------------------------------------
+ # reserve_next_available_ip()
+ # ---------------------------------------------------------------------------
+ def reserve_next_available_ip(self, network_id=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ network_id: the id of the network that users would like to reserve network from
+ return the next available ip address from that given network
+ """
+ method = "post"
+ resource_url = ''
+ response = None
+ ip_info = ''
+ if not network_id:
+ self.module.exit_json(
+ msg="You must specify the option 'network_id'.")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_ip"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if response and response.find(
+ "[") >= 0 and response.find("]") >= 0:
+ start_pos = response.find("{")
+ end_pos = response.find("}")
+ ip_info = response[start_pos: (end_pos + 1)]
+ return ip_info
+
+ # -------------------------
+ # release_ip()
+ # -------------------------
+ def release_ip(self, network_id="", ip_address=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ """
+ method = "get"
+ resource_url = ''
+ response = None
+ if ip_address is None or network_id is None:
+ self.module.exit_json(
+ msg="You must specify those two options: 'network_id' and 'ip_address'.")
+
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg="There is an error in release ip %s from network %s." %
+ (ip_address, network_id))
+
+ ip_list = json.loads(response)
+ ip_idlist = []
+ for ip_item in ip_list:
+ ip_id = ip_item['id']
+ ip_idlist.append(ip_id)
+ deleted_ip_id = ''
+ for ip_id in ip_idlist:
+ ip_response = ''
+ resource_url = "ip_addresses/" + str(ip_id)
+ ip_response = self._get_api_call_ansible_handler(
+ method,
+ resource_url,
+ stat_codes=[200])
+ if ip_response and json.loads(
+ ip_response)['address'] == str(ip_address):
+ deleted_ip_id = ip_id
+ break
+ if deleted_ip_id:
+ method = 'delete'
+ resource_url = "ip_addresses/" + str(deleted_ip_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release ip, could not find the ip address %r from the given network %r' ." %
+ (ip_address, network_id))
+
+ return response
+
+ # -------------------
+ # delete_network()
+ # -------------------
+ def delete_network(self, network_id="", network_name=""):
+ """
+ delete network from Infinity by using rest api
+ """
+ method = 'delete'
+ resource_url = ''
+ response = None
+ if network_id is None and network_name is None:
+ self.module.exit_json(
+ msg="You must specify one of those options: 'network_id','network_name' .")
+ if network_id is None and network_name:
+ network_id = self.get_network_id(network_name=network_name)
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ return response
+
+ # reserve_network()
+ # ---------------------------------------------------------------------------
+ def reserve_network(self, network_id="",
+ reserved_network_name="", reserved_network_description="",
+ reserved_network_size="", reserved_network_family='4',
+ reserved_network_type='lan', reserved_network_address="",):
+ """
+ Reserves the first available network of specified size from a given supernet
+ <dt>network_name (required)</dt><dd>Name of the network</dd>
+ <dt>description (optional)</dt><dd>Free description</dd>
+ <dt>network_family (required)</dt><dd>Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'</dd>
+ <dt>network_address (optional)</dt><dd>Address of the new network. If not given, the first network available will be created.</dd>
+ <dt>network_size (required)</dt><dd>Size of the new network in /&lt;prefix&gt; notation.</dd>
+ <dt>network_type (required)</dt><dd>Type of network. One of 'supernet', 'lan', 'shared_lan'</dd>
+
+ """
+ method = 'post'
+ resource_url = ''
+ network_info = None
+ if network_id is None or reserved_network_name is None or reserved_network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_network"
+ if not reserved_network_family:
+ reserved_network_family = '4'
+ if not reserved_network_type:
+ reserved_network_type = 'lan'
+ payload_data = {
+ "network_name": reserved_network_name,
+ 'description': reserved_network_description,
+ 'network_size': reserved_network_size,
+ 'network_family': reserved_network_family,
+ 'network_type': reserved_network_type,
+ 'network_location': int(network_id)}
+ if reserved_network_address:
+ payload_data.update({'network_address': reserved_network_address})
+
+ network_info = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[200, 201], payload_data=payload_data)
+
+ return network_info
+
+ # ---------------------------------------------------------------------------
+ # release_network()
+ # ---------------------------------------------------------------------------
+ def release_network(
+ self,
+ network_id="",
+ released_network_name="",
+ released_network_type='lan'):
+ """
+ Release the network with name 'released_network_name' from the given supernet network_id
+ """
+ method = 'get'
+ response = None
+ if network_id is None or released_network_name is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ matched_network_id = ""
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg=" there is an error in releasing network %r from network %s." %
+ (network_id, released_network_name))
+ if response:
+ response = json.loads(response)
+ for child_net in response:
+ if child_net['network'] and child_net['network']['network_name'] == released_network_name:
+ matched_network_id = child_net['network']['network_id']
+ break
+ response = None
+ if matched_network_id:
+ method = 'delete'
+ resource_url = "networks/" + str(matched_network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release network , could not find the network %r from the given superent %r' " %
+ (released_network_name, network_id))
+
+ return response
+
+ # ---------------------------------------------------------------------------
+ # add_network()
+ # ---------------------------------------------------------------------------
+ def add_network(
+ self, network_name="", network_address="",
+ network_size="", network_family='4',
+ network_type='lan', network_location=-1):
+ """
+ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet
+ required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ]
+ """
+ method = 'post'
+ resource_url = 'networks'
+ response = None
+ if network_name is None or network_address is None or network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_name', 'network_address' and 'network_size'")
+
+ if not network_family:
+ network_family = '4'
+ if not network_type:
+ network_type = 'lan'
+ if not network_location:
+ network_location = -1
+ payload_data = {
+ "network_name": network_name,
+ 'network_address': network_address,
+ 'network_size': network_size,
+ 'network_family': network_family,
+ 'network_type': network_type,
+ 'network_location': network_location}
+ response = self._get_api_call_ansible_handler(
+ method='post', resource_url=resource_url,
+ stat_codes=[200], payload_data=payload_data)
+ return response
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_ip=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ network_id=dict(type='str'),
+ ip_address=dict(type='str'),
+ network_name=dict(type='str'),
+ network_location=dict(type='int', default=-1),
+ network_family=dict(type='str', default='4', choices=['4', '6', 'dual']),
+ network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']),
+ network_address=dict(type='str'),
+ network_size=dict(type='str'),
+ action=dict(type='str', required=True, choices=[
+ 'add_network',
+ 'delete_network',
+ 'get_network',
+ 'get_network_id',
+ 'release_ip',
+ 'release_network',
+ 'reserve_network',
+ 'reserve_next_available_ip',
+ ],),
+ ),
+ required_together=(
+ ['username', 'password'],
+ ),
+ )
+ server_ip = module.params["server_ip"]
+ username = module.params["username"]
+ password = module.params["password"]
+ action = module.params["action"]
+ network_id = module.params["network_id"]
+ released_ip = module.params["ip_address"]
+ network_name = module.params["network_name"]
+ network_family = module.params["network_family"]
+ network_type = module.params["network_type"]
+ network_address = module.params["network_address"]
+ network_size = module.params["network_size"]
+ network_location = module.params["network_location"]
+ my_infinity = Infinity(module, server_ip, username, password)
+ result = ''
+ if action == "reserve_next_available_ip":
+ if network_id:
+ result = my_infinity.reserve_next_available_ip(network_id)
+ if not result:
+ result = 'There is an error in calling method of reserve_next_available_ip'
+ module.exit_json(changed=False, meta=result)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_ip":
+ if network_id and released_ip:
+ result = my_infinity.release_ip(
+ network_id=network_id, ip_address=released_ip)
+ module.exit_json(changed=True, meta=result)
+ elif action == "delete_network":
+ result = my_infinity.delete_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "get_network_id":
+ result = my_infinity.get_network_id(
+ network_name=network_name, network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+ elif action == "get_network":
+ result = my_infinity.get_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+ elif action == "reserve_network":
+ result = my_infinity.reserve_network(
+ network_id=network_id,
+ reserved_network_name=network_name,
+ reserved_network_size=network_size,
+ reserved_network_family=network_family,
+ reserved_network_type=network_type,
+ reserved_network_address=network_address)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_network":
+ result = my_infinity.release_network(
+ network_id=network_id,
+ released_network_name=network_name,
+ released_network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "add_network":
+ result = my_infinity.add_network(
+ network_name=network_name,
+ network_location=network_location,
+ network_address=network_address,
+ network_size=network_size,
+ network_family=network_family,
+ network_type=network_type)
+
+ module.exit_json(changed=True, meta=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py
new file mode 100644
index 00000000..50aec392
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# (c) 2017, Arie Bregman <abregman@redhat.com>
+#
+# This file is a module for Ansible that interacts with Network Manager
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ip_netns
+author: "Arie Bregman (@bregman-arie)"
+short_description: Manage network namespaces
+requirements: [ ip ]
+description:
+ - Create or delete network namespaces using the ip command.
+options:
+ name:
+ required: false
+ description:
+ - Name of the namespace
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the namespace should exist
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a namespace named mario
+ community.general.ip_netns:
+ name: mario
+ state: present
+
+- name: Delete a namespace named luigi
+ community.general.ip_netns:
+ name: luigi
+ state: absent
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+class Namespace(object):
+ """Interface to network namespaces. """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+
+ def _netns(self, command):
+ '''Run ip nents command'''
+ return self.module.run_command(['ip', 'netns'] + command)
+
+ def exists(self):
+ '''Check if the namespace already exists'''
+ rc, out, err = self.module.run_command('ip netns list')
+ if rc != 0:
+ self.module.fail_json(msg=to_text(err))
+ return self.name in out
+
+ def add(self):
+ '''Create network namespace'''
+ rtc, out, err = self._netns(['add', self.name])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def delete(self):
+ '''Delete network namespace'''
+ rtc, out, err = self._netns(['del', self.name])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ if self.state == 'present' and self.exists():
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ if not self.exists():
+ self.add()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """Entry point."""
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': None},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ },
+ supports_check_mode=True,
+ )
+
+ network_namespace = Namespace(module)
+ if module.check_mode:
+ network_namespace.check()
+ else:
+ network_namespace.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py
new file mode 100644
index 00000000..dcdc5ef8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+author:
+- René Moser (@resmo)
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ type: str
+ default: https://api.ipify.org/
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ type: int
+ default: 10
+ validate_certs:
+ description:
+ - When set to C(NO), SSL certificates will not be validated.
+ type: bool
+ default: yes
+notes:
+ - Visit https://www.ipify.org to get more information.
+'''
+
+EXAMPLES = r'''
+# Gather IP facts from ipify.org
+- name: Get my public IP
+ community.general.ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: Get my public IP
+ community.general.ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = r'''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: str
+ sample: 1.2.3.4
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(to_text(response.read()))
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_url=dict(type='str', default='https://api.ipify.org/'),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py
new file mode 100644
index 00000000..f4186cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Aleksei Kostiuk <unitoff@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: "Retrieve IP geolocation facts of a host's IP address"
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+author: "Aleksei Kostiuk (@akostyuk)"
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ type: int
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+ type: str
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: Get IP geolocation data
+ community.general.ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: complex
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: str
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: str
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: str
+ sample: "US"
+ region:
+ description: State or province name
+ type: str
+ sample: "California"
+ city:
+ description: City name
+ type: str
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: str
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: str
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: str
+ sample: "94035"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {0} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py
new file mode 100644
index 00000000..355c7034
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ipwcli_dns
+
+short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+
+version_added: '0.2.0'
+
+description:
+ - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
+
+requirements:
+ - ipwcli (installed on Ericsson IPWorks)
+
+notes:
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
+
+options:
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [ NAPTR, SRV, A, AAAA ]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for C(type=A) or C(type=AAAA)
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for C(type=SRV)
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for C(type=SRV)
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
+
+author:
+ - Christian Wollinger (@cwollinger)
+'''
+
+EXAMPLES = '''
+- name: Create A record
+ community.general.ipwcli_dns:
+ dnsname: example.com
+ type: A
+ container: ZoneOne
+ address: 127.0.0.1
+
+- name: Remove SRV record if exists
+ community.general.ipwcli_dns:
+ dnsname: _sip._tcp.test.example.com
+ type: SRV
+ container: ZoneOne
+ ttl: 100
+ state: absent
+ target: example.com
+ port: 5060
+
+- name: Create NAPTR record
+ community.general.ipwcli_dns:
+ dnsname: test.example.com
+ type: NAPTR
+ preference: 10
+ container: ZoneOne
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.com.'
+ flags: S
+'''
+
+RETURN = '''
+record:
+ description: The created record from the input params
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+class ResourceRecord(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.dnsname = module.params['dnsname']
+ self.dnstype = module.params['type']
+ self.container = module.params['container']
+ self.address = module.params['address']
+ self.ttl = module.params['ttl']
+ self.state = module.params['state']
+ self.priority = module.params['priority']
+ self.weight = module.params['weight']
+ self.port = module.params['port']
+ self.target = module.params['target']
+ self.order = module.params['order']
+ self.preference = module.params['preference']
+ self.flags = module.params['flags']
+ self.service = module.params['service']
+ self.replacement = module.params['replacement']
+ self.user = module.params['username']
+ self.password = module.params['password']
+
+ def create_naptrrecord(self):
+ # create NAPTR record with the given params
+ record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"'
+ % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement))
+ return record
+
+ def create_srvrecord(self):
+ # create SRV record with the given params
+ record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s'
+ % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target))
+ return record
+
+ def create_arecord(self):
+ # create A record with the given params
+ if self.dnstype == 'AAAA':
+ record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+ else:
+ record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+
+ return record
+
+ def list_record(self, record):
+ # check if the record exists via list on ipwcli
+ search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=search)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or
+ ('NAPTRRecord %s' % self.dnsname in out and rc == 0)):
+ return True, rc, out, err
+
+ return False, rc, out, err
+
+ def deploy_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'create %s' % (record)
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) created.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record creation failed', stderr=out)
+
+ def delete_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) were updated.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record deletion failed', stderr=out)
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ dnsname=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
+ container=dict(type='str', required=True),
+ address=dict(type='str', required=False),
+ ttl=dict(type='int', required=False, default=3600),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priority=dict(type='int', required=False, default=10),
+ weight=dict(type='int', required=False, default=10),
+ port=dict(type='int', required=False),
+ target=dict(type='str', required=False),
+ order=dict(type='int', required=False),
+ preference=dict(type='int', required=False),
+ flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str', required=False),
+ replacement=dict(type='str', required=False),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ # define result
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ record=''
+ )
+
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ['type', 'A', ['address']],
+ ['type', 'AAAA', ['address']],
+ ['type', 'SRV', ['port', 'target']],
+ ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']],
+ ],
+ supports_check_mode=True
+ )
+
+ user = ResourceRecord(module)
+
+ if user.dnstype == 'NAPTR':
+ record = user.create_naptrrecord()
+ elif user.dnstype == 'SRV':
+ record = user.create_srvrecord()
+ elif user.dnstype == 'A' or user.dnstype == 'AAAA':
+ record = user.create_arecord()
+
+ found, rc, out, err = user.list_record(record)
+
+ if found and user.state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.delete_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ elif not found and user.state == 'present':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.deploy_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ else:
+ result['changed'] = False
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py
new file mode 100644
index 00000000..f983b857
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ldap_attr
+short_description: Add or remove LDAP attribute values
+description:
+ - Add or remove LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.'
+ alternative: 'Use M(community.general.ldap_attrs) instead. Deprecated in community.general 0.2.0.'
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ name:
+ description:
+ - The name of the attribute to modify.
+ type: str
+ required: true
+ state:
+ description:
+ - The state of the attribute values.
+ - If C(present), all given values will be added if they're missing.
+ - If C(absent), all given values will be removed if present.
+ - If C(exact), the set of values will be forced to exactly those provided and no others.
+ - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed.
+ type: str
+ choices: [ absent, exact, present ]
+ default: present
+ values:
+ description:
+ - The value(s) to add or remove. This can be a string or a list of
+ strings. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ type: raw
+ required: true
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcSuffix
+ values: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcAccess
+ values:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcDbIndex
+ values: "{{ item }}"
+ with_items:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: "{{ item.key }}"
+ values: "{{ item.value }}"
+ state: exact
+ with_dict:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ args: "{{ ldap_auth }}"
+'''
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttr(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Normalize values
+ if isinstance(self.module.params['values'], list):
+ self.values = list(map(to_bytes, self.module.params['values']))
+ else:
+ self.values = [to_bytes(self.module.params['values'])]
+
+ def add(self):
+ values_to_add = list(filter(self._is_value_absent, self.values))
+
+ if len(values_to_add) > 0:
+ modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def delete(self):
+ values_to_delete = list(filter(self._is_value_present, self.values))
+
+ if len(values_to_delete) > 0:
+ modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def exact(self):
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % self.name, e)
+
+ current = results[0][1].get(self.name, [])
+ modlist = []
+
+ if frozenset(self.values) != frozenset(current):
+ if len(current) == 0:
+ modlist = [(ldap.MOD_ADD, self.name, self.values)]
+ elif len(self.values) == 0:
+ modlist = [(ldap.MOD_DELETE, self.name, None)]
+ else:
+ modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
+
+ return modlist
+
+ def _is_value_present(self, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, self.name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ name=dict(type='str', required=True),
+ params=dict(type='dict'),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ values=dict(type='raw', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed in since it circumvents Ansible's option handling")
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttr(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py
new file mode 100644
index 00000000..ae5cb7fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Maciej Delmanowski <drybjed@gmail.com>
+# Copyright: (c) 2017, Alexander Korinek <noles@a3k.net>
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ldap_attrs
+short_description: Add or remove multiple LDAP attribute values
+description:
+ - Add or remove multiple LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+version_added: '0.2.0'
+author:
+ - Jiri Tyr (@jtyr)
+ - Alexander Korinek (@noles)
+ - Maciej Delmanowski (@drybjed)
+requirements:
+ - python-ldap
+options:
+ state:
+ required: false
+ type: str
+ choices: [present, absent, exact]
+ default: present
+ description:
+ - The state of the attribute values. If C(present), all given attribute
+ values will be added if they're missing. If C(absent), all given
+ attribute values will be removed if present. If C(exact), the set of
+ attribute values will be forced to exactly those provided and no others.
+ If I(state=exact) and the attribute I(value) is empty, all values for
+ this attribute will be removed.
+ attributes:
+ required: true
+ type: dict
+ description:
+ - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ ordered:
+ required: false
+ type: bool
+ default: 'no'
+ description:
+ - If C(yes), prepend list values with X-ORDERED index numbers in all
+ attributes specified in the current task. This is useful mostly with
+ I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcSuffix: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+# An alternative approach with automatic X-ORDERED numbering
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ ordered: yes
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ state: exact
+
+- name: Remove an attribute with a specific value
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: "An example user account"
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+- name: Remove specified attribute(s) from an entry
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+'''
+
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+import re
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttrs(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.attrs = self.module.params['attributes']
+ self.state = self.module.params['state']
+ self.ordered = self.module.params['ordered']
+
+ def _order_values(self, values):
+ """ Preprend X-ORDERED index numbers to attribute's values. """
+ ordered_values = []
+
+ if isinstance(values, list):
+ for index, value in enumerate(values):
+ cleaned_value = re.sub(r'^\{\d+\}', '', value)
+ ordered_values.append('{' + str(index) + '}' + cleaned_value)
+
+ return ordered_values
+
+ def _normalize_values(self, values):
+ """ Normalize attribute's values. """
+ norm_values = []
+
+ if isinstance(values, list):
+ if self.ordered:
+ norm_values = list(map(to_bytes,
+ self._order_values(list(map(str,
+ values)))))
+ else:
+ norm_values = list(map(to_bytes, values))
+ else:
+ norm_values = [to_bytes(str(values))]
+
+ return norm_values
+
+ def add(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_absent(name, value):
+ modlist.append((ldap.MOD_ADD, name, value))
+
+ return modlist
+
+ def delete(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_present(name, value):
+ modlist.append((ldap.MOD_DELETE, name, value))
+
+ return modlist
+
+ def exact(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % name, e)
+
+ current = results[0][1].get(name, [])
+
+ if frozenset(norm_values) != frozenset(current):
+ if len(current) == 0:
+ modlist.append((ldap.MOD_ADD, name, norm_values))
+ elif len(norm_values) == 0:
+ modlist.append((ldap.MOD_DELETE, name, None))
+ else:
+ modlist.append((ldap.MOD_REPLACE, name, norm_values))
+
+ return modlist
+
+ def _is_value_present(self, name, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, name, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(name, value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(type='dict', required=True),
+ ordered=dict(type='bool', default=False, required=False),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttrs(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py
new file mode 100644
index 00000000..7ee0c3dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_entry
+short_description: Add or remove LDAP entries.
+description:
+ - Add or remove LDAP entries. This module only asserts the existence or
+ non-existence of an LDAP entry, not its attributes. To assert the
+ attribute values of an entry, see M(community.general.ldap_attr).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ attributes:
+ description:
+ - If I(state=present), attributes necessary to create an entry. Existing
+ entries are never modified. To assert specific attribute values on an
+ existing entry, use M(community.general.ldap_attr) module instead.
+ type: dict
+ objectClass:
+ description:
+ - If I(state=present), value or list of values to use when creating
+ the entry. It can either be a string or an actual list of
+ strings.
+ type: list
+ elements: str
+ state:
+ description:
+ - The target state of the entry.
+ choices: [present, absent]
+ default: present
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = """
+- name: Make sure we have a parent entry for users
+ community.general.ldap_entry:
+ dn: ou=users,dc=example,dc=com
+ objectClass: organizationalUnit
+
+- name: Make sure we have an admin user
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP administrator
+ userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ args: "{{ ldap_auth }}"
+"""
+
+
+RETURN = """
+# Default return values
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap.modlist
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapEntry(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.state = self.module.params['state']
+
+ # Add the objectClass into the list of attributes
+ self.module.params['attributes']['objectClass'] = (
+ self.module.params['objectClass'])
+
+ # Load attributes
+ if self.state == 'present':
+ self.attrs = self._load_attrs()
+
+ def _load_attrs(self):
+ """ Turn attribute's value to array. """
+ attrs = {}
+
+ for name, value in self.module.params['attributes'].items():
+ if isinstance(value, list):
+ attrs[name] = list(map(to_bytes, value))
+ else:
+ attrs[name] = [to_bytes(value)]
+
+ return attrs
+
+ def add(self):
+ """ If self.dn does not exist, returns a callable that will add it. """
+ def _add():
+ self.connection.add_s(self.dn, modlist)
+
+ if not self._is_entry_present():
+ modlist = ldap.modlist.addModlist(self.attrs)
+ action = _add
+ else:
+ action = None
+
+ return action
+
+ def delete(self):
+ """ If self.dn exists, returns a callable that will delete it. """
+ def _delete():
+ self.connection.delete_s(self.dn)
+
+ if self._is_entry_present():
+ action = _delete
+ else:
+ action = None
+
+ return action
+
+ def _is_entry_present(self):
+ try:
+ self.connection.search_s(self.dn, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+ else:
+ is_present = True
+
+ return is_present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(default={}, type='dict'),
+ objectClass=dict(type='list', elements='str'),
+ params=dict(type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ required_if=[('state', 'present', ['objectClass'])],
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed since it circumvents Ansible's option handling")
+
+ state = module.params['state']
+
+ # Instantiate the LdapEntry object
+ ldap = LdapEntry(module)
+
+ # Get the action function
+ if state == 'present':
+ action = ldap.add()
+ elif state == 'absent':
+ action = ldap.delete()
+
+ # Perform the action
+ if action is not None and not module.check_mode:
+ try:
+ action()
+ except Exception as e:
+ module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=(action is not None))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py
new file mode 100644
index 00000000..8d86ee93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2018, Keller Fuchs <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_passwd
+short_description: Set passwords in LDAP.
+description:
+ - Set a password for an LDAP entry. This module only asserts that
+ a given password is valid for a given entry. To assert the
+ existence of an entry, see M(community.general.ldap_entry).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Keller Fuchs (@KellerFuchs)
+requirements:
+ - python-ldap
+options:
+ passwd:
+ description:
+ - The (plaintext) password to be set for I(dn).
+ type: str
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = """
+- name: Set a password for the admin user
+ community.general.ldap_passwd:
+ dn: cn=admin,dc=example,dc=com
+ passwd: "{{ vault_secret }}"
+
+- name: Setting passwords in bulk
+ community.general.ldap_passwd:
+ dn: "{{ item.key }}"
+ passwd: "{{ item.value }}"
+ with_dict:
+ alice: alice123123
+ bob: "|30b!"
+ admin: "{{ vault_secret }}"
+"""
+
+RETURN = """
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapPasswd(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.passwd = self.module.params['passwd']
+
+ def passwd_check(self):
+ try:
+ tmp_con = ldap.initialize(self.server_uri)
+ except ldap.LDAPError as e:
+ self.fail("Cannot initialize LDAP connection", e)
+
+ if self.start_tls:
+ try:
+ tmp_con.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ tmp_con.simple_bind_s(self.dn, self.passwd)
+ except ldap.INVALID_CREDENTIALS:
+ return True
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+ else:
+ return False
+ finally:
+ tmp_con.unbind()
+
+ def passwd_set(self):
+ # Exit early if the password is already valid
+ if not self.passwd_check():
+ return False
+
+ # Change the password (or throw an exception)
+ try:
+ self.connection.passwd_s(self.dn, None, self.passwd)
+ except ldap.LDAPError as e:
+ self.fail("Unable to set password", e)
+
+ # Password successfully changed
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(passwd=dict(no_log=True)),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ ldap = LdapPasswd(module)
+
+ if module.check_mode:
+ module.exit_json(changed=ldap.passwd_check())
+
+ module.exit_json(changed=ldap.passwd_set())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py
new file mode 100644
index 00000000..3b1a2833
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2020, Sebastian Pfahl <eryx@gmx.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ldap_search
+version_added: '0.2.0'
+short_description: Search for entries in a LDAP server
+description:
+ - Return the results of an LDAP search.
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Sebastian Pfahl (@eryx12o45)
+requirements:
+ - python-ldap
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The LDAP DN to search in.
+ scope:
+ choices: [base, onelevel, subordinate, children]
+ default: base
+ type: str
+ description:
+ - The LDAP scope to use.
+ filter:
+ default: '(objectClass=*)'
+ type: str
+ description:
+ - Used for filtering the LDAP search result.
+ attrs:
+ type: list
+ elements: str
+ description:
+ - A list of attributes for limiting the result. Use an
+ actual list or a comma-separated string.
+ schema:
+ default: false
+ type: bool
+ description:
+ - Set to C(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides I(attrs) when provided.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+"""
+
+EXAMPLES = r"""
+- name: Return all entries within the 'groups' organizational unit.
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ register: ldap_groups
+
+- name: Return GIDs for all groups
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ scope: "onelevel"
+ attrs:
+ - "gidNumber"
+ register: ldap_group_gids
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ dn=dict(type='str', required=True),
+ scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']),
+ filter=dict(type='str', default='(objectClass=*)'),
+ attrs=dict(type='list', elements='str'),
+ schema=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if not module.check_mode:
+ try:
+ LdapSearch(module).main()
+ except Exception as exception:
+ module.fail_json(msg="Attribute action failed.", details=to_native(exception))
+
+ module.exit_json(changed=False)
+
+
+def _extract_entry(dn, attrs):
+ extracted = {'dn': dn}
+ for attr, val in list(attrs.items()):
+ if len(val) == 1:
+ extracted[attr] = val[0]
+ else:
+ extracted[attr] = val
+ return extracted
+
+
+class LdapSearch(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ self.dn = self.module.params['dn']
+ self.filterstr = self.module.params['filter']
+ self.attrlist = []
+ self._load_scope()
+ self._load_attrs()
+ self._load_schema()
+
+ def _load_schema(self):
+ self.schema = self.module.boolean(self.module.params['schema'])
+ if self.schema:
+ self.attrsonly = 1
+ else:
+ self.attrsonly = 0
+
+ def _load_scope(self):
+ spec = dict(
+ base=ldap.SCOPE_BASE,
+ onelevel=ldap.SCOPE_ONELEVEL,
+ subordinate=ldap.SCOPE_SUBORDINATE,
+ children=ldap.SCOPE_SUBTREE,
+ )
+ self.scope = spec[self.module.params['scope']]
+
+ def _load_attrs(self):
+ self.attrlist = self.module.params['attrs'] or None
+
+ def main(self):
+ results = self.perform_search()
+ self.module.exit_json(changed=False, results=results)
+
+ def perform_search(self):
+ try:
+ results = self.connection.search_s(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly
+ )
+ if self.schema:
+ return [dict(dn=result[0], attrs=list(result[1].keys())) for result in results]
+ else:
+ return [_extract_entry(result[0], result[1]) for result in results]
+ except ldap.NO_SUCH_OBJECT:
+ self.module.fail_json(msg="Base not found: {0}".format(self.dn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
new file mode 100644
index 00000000..ae86db40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ community.general.lldp:
+
+ - name: Print each switch/port
+ ansible.builtin.debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
+ with_items: "{{ lldp.keys() }}"
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def gather_lldp(module):
+ cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue']
+ rc, output, err = module.run_command(cmd)
+ if output:
+ output_dict = {}
+ current_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp(module)
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py
new file mode 100644
index 00000000..5d63a5b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: netcup_dns
+notes: []
+short_description: manage Netcup DNS records
+description:
+ - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
+options:
+ api_key:
+ description:
+ - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
+ required: True
+ type: str
+ api_password:
+ description:
+ - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net)
+ required: True
+ type: str
+ customer_id:
+ description:
+ - Netcup customer id
+ required: True
+ type: int
+ domain:
+ description:
+ - Domainname the records should be added / removed
+ required: True
+ type: str
+ record:
+ description:
+ - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
+ default: "@"
+ aliases: [ name ]
+ type: str
+ type:
+ description:
+ - Record type
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ required: True
+ type: str
+ value:
+ description:
+ - Record value
+ required: true
+ type: str
+ solo:
+ type: bool
+ default: False
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ priority:
+ description:
+ - Record priority. Required for C(type=MX)
+ required: False
+ type: int
+ state:
+ description:
+ - Whether the record should exist or not
+ required: False
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+requirements:
+ - "nc-dnsapi >= 0.1.3"
+author: "Nicolai Buchwitz (@nbuchwitz)"
+
+'''
+
+EXAMPLES = '''
+- name: Create a record of type A
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+
+- name: Delete that record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ state: absent
+
+- name: Create a wildcard record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "*"
+ type: "A"
+ value: "127.0.1.1"
+
+- name: Set the MX record for example.com
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ type: "MX"
+ value: "mail.example.com"
+
+- name: Set a record and ensure that this is the only one
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ name: "demo"
+ domain: "example.com"
+ type: "AAAA"
+ value: "::1"
+ solo: true
+'''
+
+RETURN = '''
+records:
+ description: list containing all records
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: the record name
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: the record type
+ returned: succcess
+ type: str
+ sample: A
+ value:
+ description: the record destination
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: the record priority (only relevant if type=MX)
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: internal id of the record
+ returned: success
+ type: int
+ sample: 12345
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+NCDNSAPI_IMP_ERR = None
+try:
+ import nc_dnsapi
+ from nc_dnsapi import DNSRecord
+
+ HAS_NCDNSAPI = True
+except ImportError:
+ NCDNSAPI_IMP_ERR = traceback.format_exc()
+ HAS_NCDNSAPI = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_password=dict(required=True, no_log=True),
+ customer_id=dict(required=True, type='int'),
+
+ domain=dict(required=True),
+ record=dict(required=False, default='@', aliases=['name']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ value=dict(required=True),
+ priority=dict(required=False, type='int'),
+ solo=dict(required=False, type='bool', default=False),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NCDNSAPI:
+ module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
+
+ api_key = module.params.get('api_key')
+ api_password = module.params.get('api_password')
+ customer_id = module.params.get('customer_id')
+ domain = module.params.get('domain')
+ record_type = module.params.get('type')
+ record = module.params.get('record')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ solo = module.params.get('solo')
+ state = module.params.get('state')
+
+ if record_type == 'MX' and not priority:
+ module.fail_json(msg="record type MX required the 'priority' argument")
+
+ has_changed = False
+ all_records = []
+ try:
+ with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
+ all_records = api.dns_records(domain)
+ record = DNSRecord(record, record_type, value, priority=priority)
+
+ # try to get existing record
+ record_exists = False
+ for r in all_records:
+ if r == record:
+ record_exists = True
+ record = r
+
+ break
+
+ if state == 'present':
+ if solo:
+ obsolete_records = [r for r in all_records if
+ r.hostname == record.hostname
+ and r.type == record.type
+ and not r.destination == record.destination]
+
+ if obsolete_records:
+ if not module.check_mode:
+ all_records = api.delete_dns_records(domain, obsolete_records)
+
+ has_changed = True
+
+ if not record_exists:
+ if not module.check_mode:
+ all_records = api.add_dns_record(domain, record)
+
+ has_changed = True
+ elif state == 'absent' and record_exists:
+ if not module.check_mode:
+ all_records = api.delete_dns_record(domain, record)
+
+ has_changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=ex.message)
+
+ module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
+
+
+def record_data(r):
+ return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py
new file mode 100644
index 00000000..660c9bc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_a_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS A records
+description:
+ - Adds and/or removes instances of A record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:a) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ type: str
+ view:
+ description:
+ - Sets the DNS view to associate this A record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ type: str
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for this A record. Users can dynamically
+ allocate ipv4 address to A record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). See example
+ aliases:
+ - ipv4
+ type: str
+ ttl:
+ description:
+ - Configures the TTL to be associated with this A record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an A record from the system
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an A record name
+ community.general.nios_a_record:
+ name: {new_name: a_new.ansible.com, old_name: a.ansible.com}
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Dynamically add a record to next available ip
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: {nios_next_ip: 192.168.10.0/24}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_A_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_A_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py
new file mode 100644
index 00000000..b7caecee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_aaaa_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS AAAA records
+description:
+ - Adds and/or removes instances of AAAA record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this AAAA record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for this AAAA record.
+ aliases:
+ - ipv6
+ ttl:
+ description:
+ - Configures the TTL to be associated with this AAAA record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an AAAA record from the system
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an AAAA record name
+ community.general.nios_aaaa_record:
+ name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com}
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_AAAA_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py
new file mode 100644
index 00000000..2863d148
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_cname_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS CNAME records
+description:
+ - Adds and/or removes instances of CNAME record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:cname) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this CNAME record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ canonical:
+ description:
+ - Configures the canonical name for this CNAME record.
+ aliases:
+ - cname
+ ttl:
+ description:
+ - Configures the TTL to be associated with this CNAME record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a CNAME record from the system
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_CNAME_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ canonical=dict(aliases=['cname'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_CNAME_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py
new file mode 100644
index 00000000..1bb8d068
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_dns_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS views
+description:
+ - Adds and/or removes instances of DNS view objects from
+ Infoblox NIOS servers. This module manages NIOS C(view) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of DNS view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - view
+ network_view:
+ description:
+ - Specifies the name of the network view to assign the configured
+ DNS view to. The network view must already be configured on the
+ target system.
+ default: default
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for dns view
+ community.general.nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the dns view instance
+ community.general.nios_dns_view:
+ name: {new_name: ansible-dns-new, old_name: ansible-dns}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['view'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_DNS_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py
new file mode 100644
index 00000000..a46db04f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_fixed_address
+author: "Sumit Jaiswal (@sjaiswal)"
+short_description: Configure Infoblox NIOS DHCP Fixed Address
+description:
+ - A fixed address is a specific IP address that a DHCP server
+ always assigns when a lease request comes from a particular
+ MAC address of the client.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the hostname with which fixed DHCP ip-address is stored
+ for respective mac.
+ required: true
+ ipaddr:
+ description:
+ - IPV4/V6 address of the fixed address.
+ required: true
+ mac:
+ description:
+ - The MAC address of the interface.
+ required: true
+ network:
+ description:
+ - Specifies the network range in which ipaddr exists.
+ required: true
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: false
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv6_fixed
+ ipaddr: fe80::1/10
+ mac: 08:6d:41:e8:fd:e8
+ network: fe80::/64
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a ipv4 fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. The use_option field only applies
+ to special options that are displayed separately from other options and
+ have a use flag. This function removes the use_option flag from all
+ other options. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ special_options = ['routers', 'router-templates', 'domain-name-servers',
+ 'domain-name', 'broadcast-address', 'broadcast-address-offset',
+ 'dhcp-lease-time', 'dhcp6.name-servers']
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ if opt['name'] not in special_options:
+ del opt['use_option']
+ options.append(opt)
+ return options
+
+
+def validate_ip_addr_type(ip, arg_spec, module):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
+ '''
+ check_ip = ip.split('/')
+
+ if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv4addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
+ elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv6addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ name=dict(required=True),
+ ipaddr=dict(required=True, ib_req=True),
+ mac=dict(required=True, ib_req=True),
+ network=dict(required=True),
+ network_view=dict(default='default'),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ # to modify argument based on ipaddr type i.e. IPV4/IPV6
+ fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
+
+ wapi = WapiModule(module)
+
+ result = wapi.run(fixed_address_ip_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py
new file mode 100644
index 00000000..efab39de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_host_record
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS host records
+description:
+ - Adds and/or removes instances of host record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:host) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of host record object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this host record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ configure_for_dns:
+ description:
+ - Sets the DNS to particular parent. If user needs to bypass DNS
+ user can make the value to false.
+ type: bool
+ required: false
+ default: true
+ aliases:
+ - dns
+ ipv4addrs:
+ description:
+ - Configures the IPv4 addresses for this host record. This argument
+ accepts a list of values (see suboptions)
+ aliases:
+ - ipv4
+ type: list
+ elements: dict
+ suboptions:
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for the host record. Users can dynamically
+ allocate ipv4 address to host record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). If user wants to add or
+ remove the ipv4 address from existing record, I(add/remove)
+ params need to be used. See examples
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ - dhcp
+ mac:
+ description:
+ - Configures the hardware MAC address for the host record. If user makes
+ DHCP to true, user need to mention MAC address.
+ required: false
+ add:
+ description:
+ - If user wants to add the ipv4 address to an existing host record.
+ Note that with I(add) user will have to keep the I(state) as I(present),
+ as new IP address is allocated to existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ remove:
+ description:
+ - If user wants to remove the ipv4 address from an existing host record.
+ Note that with I(remove) user will have to change the I(state) to I(absent),
+ as IP address is de-allocated from an existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ ipv6addrs:
+ description:
+ - Configures the IPv6 addresses for the host record. This argument
+ accepts a list of values (see options)
+ aliases:
+ - ipv6
+ type: list
+ elements: dict
+ suboptions:
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for the host record
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ description:
+ - Configures an optional list of additional aliases to add to the host
+ record. These are equivalent to CNAMEs but held within a host
+ record. Must be in list format.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an ipv4 host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ aliases:
+ - cname.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a comment to an existing host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a host record from the system
+ community.general.nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update an ipv4 host record
+ community.general.nios_host_record:
+ name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record bypassing DNS
+ community.general.nios_host_record:
+ name: new_host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record over DHCP
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Dynamically add host record to next available ip
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: {nios_next_ip: 192.168.10.0/24}
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.2
+ add: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ remove: true
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
+
+
+def ipaddr(module, key, filtered_keys=None):
+ ''' Transforms the input value into a struct supported by WAPI
+ This function will transform the input from the playbook into a struct
+ that is valid for WAPI in the form of:
+ {
+ ipv4addr: <value>,
+ mac: <value>
+ }
+ This function does not validate the values are properly formatted or in
+ the acceptable range, that is left to WAPI.
+ '''
+ filtered_keys = filtered_keys or list()
+ objects = list()
+ for item in module.params[key]:
+ objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
+ return objects
+
+
+def ipv4addrs(module):
+ return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
+
+
+def ipv6addrs(module):
+ return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4addr_spec = dict(
+ ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
+ mac=dict(required=False, ib_req=True),
+ add=dict(type='bool', required=False),
+ remove=dict(type='bool', required=False)
+ )
+
+ ipv6addr_spec = dict(
+ ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, ib_req=True),
+ mac=dict(required=False, ib_req=True)
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
+ ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
+ configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
+ aliases=dict(type='list'),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_HOST_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py
new file mode 100644
index 00000000..aff8ca93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_member
+author: "Krishna Vasudevan (@krisvasudevan)"
+short_description: Configure Infoblox NIOS members
+description:
+ - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ host_name:
+ description:
+ - Specifies the host name of the member to either add or remove from
+ the NIOS instance.
+ required: true
+ aliases:
+ - name
+ vip_setting:
+ description:
+ - Configures the network settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of the Grid Member
+ subnet_mask:
+ description:
+ - The subnet mask for the Grid Member
+ gateway:
+ description:
+ - The default gateway for the Grid Member
+ ipv6_setting:
+ description:
+ - Configures the IPv6 settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of the Grid Member
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix for the Grid Member
+ gateway:
+ description:
+ - The gateway address for the Grid Member
+ config_addr_type:
+ description:
+ - Address configuration type (IPV4/IPV6/BOTH)
+ default: IPV4
+ comment:
+ description:
+ - A descriptive comment of the Grid member.
+ extattrs:
+ description:
+ - Extensible attributes associated with the object.
+ enable_ha:
+ description:
+ - If set to True, the member has two physical nodes (HA pair).
+ type: bool
+ router_id:
+ description:
+ - Virtual router identifier. Provide this ID if "ha_enabled" is set to "true". This is a unique VRID number (from 1 to 255) for the local subnet.
+ lan2_enabled:
+ description:
+ - When set to "true", the LAN2 port is enabled as an independent port or as a port for failover purposes.
+ type: bool
+ lan2_port_setting:
+ description:
+ - Settings for the Grid member LAN2 port if 'lan2_enabled' is set to "true".
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - If set to True, then it has its own IP settings.
+ type: bool
+ network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv4 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of LAN2
+ subnet_mask:
+ description:
+ - The subnet mask of LAN2
+ gateway:
+ description:
+ - The default gateway of LAN2
+ v6_network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv6 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of LAN2
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of LAN2
+ gateway:
+ description:
+ - The gateway address of LAN2
+ platform:
+ description:
+ - Configures the Hardware Platform.
+ default: INFOBLOX
+ node_info:
+ description:
+ - Configures the node information list with detailed status report on the operations of the Grid Member.
+ type: list
+ elements: dict
+ suboptions:
+ lan2_physical_setting:
+ description:
+ - Physical port settings for the LAN2 interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_ha_port_setting:
+ description:
+ - LAN/HA port settings for the node.
+ type: list
+ elements: dict
+ suboptions:
+ ha_ip_address:
+ description:
+ - HA IP address.
+ ha_port_setting:
+ description:
+ - Physical port settings for the HA interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_port_setting:
+ description:
+ - Physical port settings for the LAN interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ mgmt_ipv6addr:
+ description:
+ - Public IPv6 address for the LAN1 interface.
+ mgmt_lan:
+ description:
+ - Public IPv4 address for the LAN1 interface.
+ mgmt_network_setting:
+ description:
+ - Network settings for the MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of MGMT
+ subnet_mask:
+ description:
+ - The subnet mask of MGMT
+ gateway:
+ description:
+ - The default gateway of MGMT
+ v6_mgmt_network_setting:
+ description:
+ - The network settings for the IPv6 MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of MGMT
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of MGMT
+ gateway:
+ description:
+ - The gateway address of MGMT
+ mgmt_port_setting:
+ description:
+ - Settings for the member MGMT port.
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - Determines if MGMT port settings should be enabled.
+ type: bool
+ security_access_enabled:
+ description:
+ - Determines if security access on the MGMT port is enabled or not.
+ type: bool
+ vpn_enabled:
+ description:
+ - Determines if VPN on the MGMT port is enabled or not.
+ type: bool
+ upgrade_group:
+ description:
+ - The name of the upgrade group to which this Grid member belongs.
+ default: Default
+ use_syslog_proxy_setting:
+ description:
+ - Use flag for external_syslog_server_enable , syslog_servers, syslog_proxy_setting, syslog_size
+ type: bool
+ external_syslog_server_enable:
+ description:
+ - Determines if external syslog servers should be enabled
+ type: bool
+ syslog_servers:
+ description:
+ - The list of external syslog servers.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The server address.
+ category_list:
+ description:
+ - The list of all syslog logging categories.
+ connection_type:
+ description:
+ - The connection type for communicating with this server.(STCP/TCP?UDP)
+ default: UDP
+ local_interface:
+ description:
+ - The local interface through which the appliance sends syslog messages to the syslog server.(ANY/LAN/MGMT)
+ default: ANY
+ message_node_id:
+ description:
+ - Identify the node in the syslog message. (HOSTNAME/IP_HOSTNAME/LAN/MGMT)
+ default: LAN
+ message_source:
+ description:
+ - The source of syslog messages to be sent to the external syslog server.
+ default: ANY
+ only_category_list:
+ description:
+ - The list of selected syslog logging categories. The appliance forwards syslog messages that belong to the selected categories.
+ type: bool
+ port:
+ description:
+ - The port this server listens on.
+ default: 514
+ severity:
+ description:
+ - The severity filter. The appliance sends log messages of the specified severity and above to the external syslog server.
+ default: DEBUG
+ pre_provisioning:
+ description:
+ - Pre-provisioning information.
+ type: list
+ elements: dict
+ suboptions:
+ hardware_info:
+ description:
+ - An array of structures that describe the hardware being pre-provisioned.
+ type: list
+ elements: dict
+ suboptions:
+ hwmodel:
+ description:
+ - Hardware model
+ hwtype:
+ description:
+ - Hardware type.
+ licenses:
+ description:
+ - An array of license types.
+ create_token:
+ description:
+ - Flag for initiating a create token request for pre-provisioned members.
+ type: bool
+ default: False
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Add a member to the grid with IPv4 address
+ community.general.nios_member:
+ host_name: member01.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a HA member to the grid
+ community.general.nios_member:
+ host_name: memberha.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ enable_ha: true
+ router_id: 150
+ node_info:
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.70
+ mgmt_lan: 192.168.1.80
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.71
+ mgmt_lan: 192.168.1.81
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the member with pre-provisioning details specified
+ community.general.nios_member:
+ name: member01.localdomain
+ pre_provisioning:
+ - hardware_info:
+ - hwmodel: IB-VM-820
+ hwtype: IB-VNIOS
+ licenses:
+ - dns
+ - dhcp
+ - enterprise
+ - vnios
+ comment: "Updated by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the member
+ community.general.nios_member:
+ name: member01.localdomain
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MEMBER
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4_spec = dict(
+ address=dict(),
+ subnet_mask=dict(),
+ gateway=dict(),
+ )
+
+ ipv6_spec = dict(
+ virtual_ip=dict(),
+ cidr_prefix=dict(type='int'),
+ gateway=dict(),
+ )
+
+ port_spec = dict(
+ auto_port_setting_enabled=dict(type='bool'),
+ duplex=dict(),
+ speed=dict(),
+ )
+
+ lan2_port_spec = dict(
+ enabled=dict(type='bool'),
+ network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ ha_port_spec = dict(
+ ha_ip_address=dict(),
+ ha_port_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_port_setting=dict(type='list', elements='dict', options=port_spec),
+ mgmt_lan=dict(),
+ mgmt_ipv6addr=dict(),
+ )
+
+ node_spec = dict(
+ lan2_physical_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_ha_port_setting=dict(type='list', elements='dict', options=ha_port_spec),
+ mgmt_network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_mgmt_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ mgmt_port_spec = dict(
+ enabled=dict(type='bool'),
+ security_access_enabled=dict(type='bool'),
+ vpn_enabled=dict(type='bool'),
+ )
+
+ syslog_spec = dict(
+ address=dict(),
+ category_list=dict(type='list'),
+ connection_type=dict(default='UDP'),
+ local_interface=dict(default='ANY'),
+ message_node_id=dict(default='LAN'),
+ message_source=dict(default='ANY'),
+ only_category_list=dict(type='bool'),
+ port=dict(type='int', default=514),
+ severity=dict(default='DEBUG'),
+ )
+
+ hw_spec = dict(
+ hwmodel=dict(),
+ hwtype=dict(),
+ )
+
+ pre_prov_spec = dict(
+ hardware_info=dict(type='list', elements='dict', options=hw_spec),
+ licenses=dict(type='list'),
+ )
+
+ ib_spec = dict(
+ host_name=dict(required=True, aliases=['name'], ib_req=True),
+ vip_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ ipv6_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ config_addr_type=dict(default='IPV4'),
+ comment=dict(),
+ enable_ha=dict(type='bool', default=False),
+ router_id=dict(type='int'),
+ lan2_enabled=dict(type='bool', default=False),
+ lan2_port_setting=dict(type='list', elements='dict', options=lan2_port_spec),
+ platform=dict(default='INFOBLOX'),
+ node_info=dict(type='list', elements='dict', options=node_spec),
+ mgmt_port_setting=dict(type='list', elements='dict', options=mgmt_port_spec),
+ upgrade_group=dict(default='Default'),
+ use_syslog_proxy_setting=dict(type='bool'),
+ external_syslog_server_enable=dict(type='bool'),
+ syslog_servers=dict(type='list', elements='dict', options=syslog_spec),
+ pre_provisioning=dict(type='list', elements='dict', options=pre_prov_spec),
+ extattrs=dict(type='dict'),
+ create_token=dict(type='bool', default=False),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MEMBER, ib_spec)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py
new file mode 100644
index 00000000..ca1f1f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_mx_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS MX records
+description:
+ - Adds and/or removes instances of MX record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:mx) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ mail_exchanger:
+ description:
+ - Configures the mail exchanger FQDN for this MX record.
+ aliases:
+ - mx
+ preference:
+ description:
+ - Configures the preference (0-65535) for this MX record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an MX record from the system
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MX_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ mail_exchanger=dict(aliases=['mx'], ib_req=True),
+ preference=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MX_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py
new file mode 100644
index 00000000..de57e692
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_naptr_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS NAPTR records
+description:
+ - Adds and/or removes instances of NAPTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ order:
+ description:
+ - Configures the order (0-65535) for this NAPTR record. This parameter
+ specifies the order in which the NAPTR rules are applied when
+ multiple rules are present.
+ preference:
+ description:
+ - Configures the preference (0-65535) for this NAPTR record. The
+ preference field determines the order NAPTR records are processed
+ when multiple records with the same order parameter are present.
+ replacement:
+ description:
+ - Configures the replacement field for this NAPTR record.
+ For nonterminal NAPTR records, this field specifies the
+ next domain name to look up.
+ services:
+ description:
+ - Configures the services field (128 characters maximum) for this
+ NAPTR record. The services field contains protocol and service
+ identifiers, such as "http+E2U" or "SIPS+D2T".
+ required: false
+ flags:
+ description:
+ - Configures the flags field for this NAPTR record. These control the
+ interpretation of the fields for an NAPTR record object. Supported
+ values for the flags field are "U", "S", "P" and "A".
+ required: false
+ regexp:
+ description:
+ - Configures the regexp field for this NAPTR record. This is the
+ regular expression-based rewriting rule of the NAPTR record. This
+ should be a POSIX compliant regular expression, including the
+ substitution rule and flags. Refer to RFC 2915 for the field syntax
+ details.
+ required: false
+ ttl:
+ description:
+ - Configures the TTL to be associated with this NAPTR record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a NAPTR record from the system
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ order=dict(type='int', ib_req=True),
+ preference=dict(type='int', ib_req=True),
+ replacement=dict(ib_req=True),
+ services=dict(),
+ flags=dict(),
+ regexp=dict(),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:naptr', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py
new file mode 100644
index 00000000..54b8dfb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network object
+description:
+ - Adds and/or removes instances of network objects from
+ Infoblox NIOS servers. This module manages NIOS C(network) objects
+ using the Infoblox WAPI interface over REST.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ network:
+ description:
+ - Specifies the network to add or remove from the system. The value
+ should use CIDR notation.
+ required: true
+ aliases:
+ - name
+ - cidr
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: true
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure. The standard options are
+ C(router), C(router-templates), C(domain-name-servers), C(domain-name),
+ C(broadcast-address), C(broadcast-address-offset), C(dhcp-lease-time),
+ and C(dhcp6.name-servers).
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ container:
+ description:
+ - If set to true it'll create the network container to be added or removed
+ from the system.
+ type: bool
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a network ipv6
+ community.general.nios_network:
+ network: fe80::/64
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv4 network container
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 network container
+ community.general.nios_network:
+ network: fe80::/64
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 network container
+ community.general.nios_network:
+ networkr: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK, NIOS_IPV6_NETWORK
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK_CONTAINER, NIOS_IPV6_NETWORK_CONTAINER
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ options.append(opt)
+ return options
+
+
+def check_ip_addr_type(obj_filter, ib_spec):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox
+ network/networkcontainer type
+ '''
+
+ ip = obj_filter['network']
+ if 'container' in obj_filter and obj_filter['container']:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ del ib_spec['options'] # removing option argument as for network container it's not supported
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK_CONTAINER, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK_CONTAINER, ib_spec
+ else:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK, ib_spec
+
+
+def check_vendor_specific_dhcp_option(module, ib_spec):
+ '''This function will check if the argument dhcp option belongs to vendor-specific and if yes then will remove
+ use_options flag which is not supported with vendor-specific dhcp options.
+ '''
+ for key, value in iteritems(ib_spec):
+ if isinstance(module.params[key], list):
+ temp_dict = module.params[key][0]
+ if 'num' in temp_dict:
+ if temp_dict['num'] in (43, 124, 125):
+ del module.params[key][0]['use_option']
+ return ib_spec
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ network=dict(required=True, aliases=['name', 'cidr'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ container=dict(type='bool', ib_req=True)
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ network_type, ib_spec = check_ip_addr_type(obj_filter, ib_spec)
+
+ wapi = WapiModule(module)
+ # to check for vendor specific dhcp option
+ ib_spec = check_vendor_specific_dhcp_option(module, ib_spec)
+
+ result = wapi.run(network_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py
new file mode 100644
index 00000000..d13052b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network views
+description:
+ - Adds and/or removes instances of network view objects from
+ Infoblox NIOS servers. This module manages NIOS C(networkview) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of network view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - network_view
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new network view
+ community.general.nios_network_view:
+ name: ansible
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for network view
+ community.general.nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the network view
+ community.general.nios_network_view:
+ name: ansible
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update a existing network view
+ community.general.nios_network_view:
+ name: {new_name: ansible-new, old_name: ansible}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NETWORK_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['network_view'], ib_req=True),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NETWORK_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py
new file mode 100644
index 00000000..bf2afd3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_nsgroup
+short_description: Configure InfoBlox DNS Nameserver Groups
+extends_documentation_fragment:
+- community.general.nios
+
+author:
+ - Erich Birngruber (@ebirn)
+ - Sumit Jaiswal (@sjaiswal)
+description:
+ - Adds and/or removes nameserver groups form Infoblox NIOS servers.
+ This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
+requirements:
+ - infoblox_client
+options:
+ name:
+ description:
+ - Specifies the name of the NIOS nameserver group to be managed.
+ required: true
+ grid_primary:
+ description:
+ - This host is to be used as primary server in this nameserver group. It must be a grid member.
+ This option is required when setting I(use_external_primaries) to C(false).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ grid_secondaries:
+ description:
+ - Configures the list of grid member hosts that act as secondary nameservers.
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ is_grid_default:
+ description:
+ - If set to C(True) this nsgroup will become the default nameserver group for new zones.
+ type: bool
+ required: false
+ default: false
+ use_external_primary:
+ description:
+ - This flag controls whether the group is using an external primary nameserver.
+ Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
+ type: bool
+ required: false
+ default: false
+ external_primaries:
+ description:
+ - Configures a list of external nameservers (non-members of the grid).
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ required: false
+ external_secondaries:
+ description:
+ - Allows to provide a list of external secondary nameservers, that are not members of the grid.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ choices: [present, absent]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Create simple infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Create infoblox nameserver group with external primaries
+ community.general.nios_nsgroup:
+ name: my-example-group
+ use_external_primary: true
+ comment: "this is my example nameserver group"
+ external_primaries: "{{ ext_nameservers }}"
+ grid_secondaries:
+ - name: infoblox-test.example.com
+ lead: True
+ preferred_primaries: "{{ ext_nameservers }}"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP
+
+
+# from infoblox documentation
+# Fields List
+# Field Type Req R/O Base Search
+# comment String N N Y : = ~
+# extattrs Extattr N N N ext
+# external_primaries [struct] N N N N/A
+# external_secondaries [struct] N N N N/A
+# grid_primary [struct] N N N N/A
+# grid_secondaries [struct] N N N N/A
+# is_grid_default Bool N N N N/A
+# is_multimaster Bool N Y N N/A
+# name String Y N Y : = ~
+# use_external_primary Bool N N N N/A
+
+
+def main():
+ '''entrypoint for module execution.'''
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ # cleanup tsig fields
+ def clean_tsig(ext):
+ if 'tsig_key' in ext and not ext['tsig_key']:
+ del ext['tsig_key']
+ if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
+ del ext['tsig_key_name']
+ if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
+ del ext['tsig_key_alg']
+
+ def clean_grid_member(member):
+ if member['preferred_primaries']:
+ for ext in member['preferred_primaries']:
+ clean_tsig(ext)
+ if member['enable_preferred_primaries'] is False:
+ del member['enable_preferred_primaries']
+ del member['preferred_primaries']
+ if member['lead'] is False:
+ del member['lead']
+ if member['grid_replicate'] is False:
+ del member['grid_replicate']
+
+ def ext_primaries_transform(module):
+ if module.params['external_primaries']:
+ for ext in module.params['external_primaries']:
+ clean_tsig(ext)
+ return module.params['external_primaries']
+
+ def ext_secondaries_transform(module):
+ if module.params['external_secondaries']:
+ for ext in module.params['external_secondaries']:
+ clean_tsig(ext)
+ return module.params['external_secondaries']
+
+ def grid_primary_preferred_transform(module):
+ for member in module.params['grid_primary']:
+ clean_grid_member(member)
+ return module.params['grid_primary']
+
+ def grid_secondaries_preferred_primaries_transform(module):
+ for member in module.params['grid_secondaries']:
+ clean_grid_member(member)
+ return module.params['grid_secondaries']
+
+ extserver_spec = dict(
+ address=dict(required=True, ib_req=True),
+ name=dict(required=True, ib_req=True),
+ stealth=dict(type='bool', default=False),
+ tsig_key=dict(no_log=True),
+ tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
+ tsig_key_name=dict(required=True)
+ )
+
+ memberserver_spec = dict(
+ name=dict(required=True, ib_req=True),
+ enable_preferred_primaries=dict(type='bool', default=False),
+ grid_replicate=dict(type='bool', default=False),
+ lead=dict(type='bool', default=False),
+ preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
+ stealth=dict(type='bool', default=False),
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_primary_preferred_transform),
+ grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_secondaries_preferred_primaries_transform),
+ external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
+ external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
+ transform=ext_secondaries_transform),
+ is_grid_default=dict(type='bool', default=False),
+ use_external_primary=dict(type='bool', default=False),
+ extattrs=dict(),
+ comment=dict(),
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NSGROUP, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py
new file mode 100644
index 00000000..96fb175b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_ptr_record
+author: "Trebuchet Clement (@clementtrebuchet)"
+short_description: Configure Infoblox NIOS PTR records
+description:
+ - Adds and/or removes instances of PTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - The name of the DNS PTR record in FQDN format to add or remove from
+ the system.
+ The field is required only for an PTR object in Forward Mapping Zone.
+ required: false
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ required: false
+ aliases:
+ - dns_view
+ ipv4addr:
+ description:
+ - The IPv4 Address of the record. Mutually exclusive with the ipv6addr.
+ aliases:
+ - ipv4
+ ipv6addr:
+ description:
+ - The IPv6 Address of the record. Mutually exclusive with the ipv4addr.
+ aliases:
+ - ipv6
+ ptrdname:
+ description:
+ - The domain name of the DNS PTR record in FQDN format.
+ ttl:
+ description:
+ - Time To Live (TTL) value for the record.
+ A 32-bit unsigned integer that represents the duration, in seconds, that the record is valid (cached).
+ Zero indicates that the record should not be cached.
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance. Maximum 256 characters.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Create a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_PTR_RECORD
+
+
+def main():
+ # Module entry point
+ ib_spec = dict(
+ name=dict(required=False),
+ view=dict(aliases=['dns_view'], ib_req=True),
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+ ptrdname=dict(ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ mutually_exclusive = [('ipv4addr', 'ipv6addr')]
+ required_one_of = [
+ ['ipv4addr', 'ipv6addr']
+ ]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ required_one_of=required_one_of)
+
+ if module.params['ipv4addr']:
+ del ib_spec['ipv6addr']
+ elif module.params['ipv6addr']:
+ del ib_spec['ipv4addr']
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_PTR_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py
new file mode 100644
index 00000000..c519c191
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_srv_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS SRV records
+description:
+ - Adds and/or removes instances of SRV record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:srv) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ port:
+ description:
+ - Configures the port (0-65535) of this SRV record.
+ priority:
+ description:
+ - Configures the priority (0-65535) for this SRV record.
+ target:
+ description:
+ - Configures the target FQDN for this SRV record.
+ weight:
+ description:
+ - Configures the weight (0-65535) for this SRV record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an SRV record from the system
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ port=dict(type='int', ib_req=True),
+ priority=dict(type='int', ib_req=True),
+ target=dict(ib_req=True),
+ weight=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_SRV_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py
new file mode 100644
index 00000000..0dcdbadb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py
new file mode 100644
index 00000000..8a7607fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ required: true
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ required: true
+ ns_group:
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a zone on the system using grid primary and secondaries
+ community.general.nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a zone on the system using a name server group
+ community.general.nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV6 zone format
+ community.general.nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment and ext attributes for an existing zone
+ community.general.nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns zone
+ community.general.nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the reverse mapping dns zone from the system with IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py
new file mode 100644
index 00000000..60626294
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+- Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+- nmcli
+description:
+ - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
+ - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+options:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ type: bool
+ default: yes
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is <type>[-<ifname>][-<num>].
+ type: str
+ required: true
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection will only be applicable to this interface name.
+ - A special value of C('*') can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+ - This parameter defaults to C(conn_name) when left unset.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type C(generic) is added in Ansible 2.5.
+ type: str
+ choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+ type: str
+ choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+ default: balance-rr
+ master:
+ description:
+ - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ type: str
+ ip4:
+ description:
+ - The IPv4 address to this interface.
+ - Use the format C(192.0.2.24/24).
+ type: str
+ gw4:
+ description:
+ - The IPv4 gateway for this interface.
+ - Use the format C(192.0.2.1).
+ type: str
+ dns4:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ ip6:
+ description:
+ - The IPv6 address to this interface.
+ - Use the format C(abbe::cafe).
+ type: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format C(2001:db8::1).
+ type: str
+ dns6:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ mtu:
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+ - This parameter defaults to C(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to C(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: yes
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - This is only used with bridge - MAC address of the bridge.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
+ type: bool
+ default: yes
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT local IP address.
+ type: str
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# | /group_vars/openstack-stage.yml
+# | /host_vars/controller-01.openstack.host.com
+# | /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# | /playbook-add.yml
+# | /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #bond vars
+# nmcli_bond:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# mode: balance-rr
+# nmcli_bond_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+# - conn_name: em1
+# ifname: em1
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: em2
+# ifname: em2
+# ip4: '{{ tenant_ip1 }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: p2p1
+# ifname: p2p1
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# - conn_name: p2p2
+# ifname: p2p2
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
+
+##### Working with all cloud nodes - Teaming
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
+
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
+
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
+
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: yes
+
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
+
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
+
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
+
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
+
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
+
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+# - 0 Success - indicates the operation succeeded
+# - 1 Unknown or unspecified error
+# - 2 Invalid user input, wrong nmcli invocation
+# - 3 Timeout expired (see --wait option)
+# - 4 Connection activation failed
+# - 5 Connection deactivation failed
+# - 6 Disconnecting device failed
+# - 7 Connection deletion failed
+# - 8 NetworkManager is not running
+# - 9 nmcli and NetworkManager versions mismatch
+# - 10 Connection, device, or access point does not exist.
+'''
+
+RETURN = r"""#
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+import re
+
+
+class NmcliModuleError(Exception):
+ pass
+
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.dns4 = module.params['dns4']
+ self.dns4_search = module.params['dns4_search']
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.dns6 = module.params['dns6']
+ self.dns6_search = module.params['dns6_search']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.primary = module.params['primary']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ self.hairpin = module.params['hairpin']
+ self.path_cost = module.params['path_cost']
+ self.mac = module.params['mac']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
+ self.vxlan_id = module.params['vxlan_id']
+ self.vxlan_local = module.params['vxlan_local']
+ self.vxlan_remote = module.params['vxlan_remote']
+ self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+ self.ip_tunnel_local = module.params['ip_tunnel_local']
+ self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+ self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+ self.dhcp_client_id = module.params['dhcp_client_id']
+
+ if self.ip4:
+ self.ipv4_method = 'manual'
+ else:
+ # supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv4_method = None
+
+ if self.ip6:
+ self.ipv6_method = 'manual'
+ else:
+ # supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv6_method = None
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ if isinstance(cmd, list):
+ cmd = [to_text(item) for item in cmd]
+ else:
+ cmd = to_text(cmd)
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def connection_options(self, detect_change=False):
+ # Options common to multiple connection types.
+ options = {
+ 'connection.autoconnect': self.autoconnect,
+ }
+
+ # IP address options.
+ if self.ip_conn_type:
+ options.update({
+ 'ipv4.addresses': self.ip4,
+ 'ipv4.dhcp-client-id': self.dhcp_client_id,
+ 'ipv4.dns': self.dns4,
+ 'ipv4.dns-search': self.dns4_search,
+ 'ipv4.gateway': self.gw4,
+ 'ipv4.method': self.ipv4_method,
+ 'ipv6.addresses': self.ip6,
+ 'ipv6.dns': self.dns6,
+ 'ipv6.dns-search': self.dns6_search,
+ 'ipv6.gateway': self.gw6,
+ 'ipv6.method': self.ipv6_method,
+ })
+
+ # Layer 2 options.
+ if self.mac_conn_type:
+ options.update({self.mac_setting: self.mac})
+
+ if self.mtu_conn_type:
+ options.update({self.mtu_setting: self.mtu})
+
+ # Connections that can have a master.
+ if self.slave_conn_type:
+ options.update({
+ 'connection.master': self.master,
+ })
+
+ # Options specific to a connection type.
+ if self.type == 'bond':
+ options.update({
+ 'arp-interval': self.arp_interval,
+ 'arp-ip-target': self.arp_ip_target,
+ 'downdelay': self.downdelay,
+ 'miimon': self.miimon,
+ 'mode': self.mode,
+ 'primary': self.primary,
+ 'updelay': self.updelay,
+ })
+ elif self.type == 'bridge':
+ options.update({
+ 'bridge.ageing-time': self.ageingtime,
+ 'bridge.forward-delay': self.forwarddelay,
+ 'bridge.hello-time': self.hellotime,
+ 'bridge.max-age': self.maxage,
+ 'bridge.priority': self.priority,
+ 'bridge.stp': self.stp,
+ })
+ elif self.type == 'bridge-slave':
+ options.update({
+ 'bridge-port.path-cost': self.path_cost,
+ 'bridge-port.hairpin-mode': self.hairpin,
+ 'bridge-port.priority': self.slavepriority,
+ })
+ elif self.tunnel_conn_type:
+ options.update({
+ 'ip-tunnel.local': self.ip_tunnel_local,
+ 'ip-tunnel.mode': self.type,
+ 'ip-tunnel.parent': self.ip_tunnel_dev,
+ 'ip-tunnel.remote': self.ip_tunnel_remote,
+ })
+ elif self.type == 'vlan':
+ options.update({
+ 'vlan.id': self.vlanid,
+ 'vlan.parent': self.vlandev,
+ })
+ elif self.type == 'vxlan':
+ options.update({
+ 'vxlan.id': self.vxlan_id,
+ 'vxlan.local': self.vxlan_local,
+ 'vxlan.remote': self.vxlan_remote,
+ })
+
+ # Convert settings values based on the situation.
+ for setting, value in options.items():
+ setting_type = self.settings_type(setting)
+ convert_func = None
+ if setting_type is bool:
+ # Convert all bool options to yes/no.
+ convert_func = self.bool_to_string
+ if detect_change:
+ if setting in ('vlan.id', 'vxlan.id'):
+ # Convert VLAN/VXLAN IDs to text when detecting changes.
+ convert_func = to_text
+ elif setting == self.mtu_setting:
+ # MTU is 'auto' by default when detecting changes.
+ convert_func = self.mtu_to_string
+ elif setting_type is list:
+ # Convert lists to strings for nmcli create/modify commands.
+ convert_func = self.list_to_string
+
+ if callable(convert_func):
+ options[setting] = convert_func(options[setting])
+
+ return options
+
+ @property
+ def ip_conn_type(self):
+ return self.type in (
+ 'bond',
+ 'bridge',
+ 'ethernet',
+ 'generic',
+ 'team',
+ 'vlan',
+ )
+
+ @property
+ def mac_conn_type(self):
+ return self.type == 'bridge'
+
+ @property
+ def mac_setting(self):
+ if self.type == 'bridge':
+ return 'bridge.mac-address'
+ else:
+ return '802-3-ethernet.cloned-mac-address'
+
+ @property
+ def mtu_conn_type(self):
+ return self.type in (
+ 'ethernet',
+ 'team-slave',
+ )
+
+ @property
+ def mtu_setting(self):
+ return '802-3-ethernet.mtu'
+
+ @staticmethod
+ def mtu_to_string(mtu):
+ if not mtu:
+ return 'auto'
+ else:
+ return to_text(mtu)
+
+ @property
+ def slave_conn_type(self):
+ return self.type in (
+ 'bond-slave',
+ 'bridge-slave',
+ 'team-slave',
+ )
+
+ @property
+ def tunnel_conn_type(self):
+ return self.type in (
+ 'ipip',
+ 'sit',
+ )
+
+ @staticmethod
+ def bool_to_string(boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ @staticmethod
+ def list_to_string(lst):
+ return ",".join(lst or [""])
+
+ @staticmethod
+ def settings_type(setting):
+ if setting in ('bridge.stp',
+ 'bridge-port.hairpin-mode',
+ 'connection.autoconnect'):
+ return bool
+ elif setting in ('ipv4.dns',
+ 'ipv4.dns-search',
+ 'ipv6.dns',
+ 'ipv6.dns-search'):
+ return list
+ return str
+
+ def list_connection_info(self):
+ cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ raise NmcliModuleError(err)
+ return out.splitlines()
+
+ def connection_exists(self):
+ return self.conn_name in self.list_connection_info()
+
+ def down_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+ return self.execute_command(cmd)
+
+ def connection_update(self, nmcli_command):
+ if nmcli_command == 'create':
+ cmd = [self.nmcli_bin, 'con', 'add', 'type']
+ if self.tunnel_conn_type:
+ cmd.append('ip-tunnel')
+ else:
+ cmd.append(self.type)
+ cmd.append('con-name')
+ elif nmcli_command == 'modify':
+ cmd = [self.nmcli_bin, 'con', 'modify']
+ else:
+ self.module.fail_json(msg="Invalid nmcli command.")
+ cmd.append(self.conn_name)
+
+ # Use connection name as default for interface name on creation.
+ if nmcli_command == 'create' and self.ifname is None:
+ ifname = self.conn_name
+ else:
+ ifname = self.ifname
+
+ options = {
+ 'connection.interface-name': ifname,
+ }
+
+ options.update(self.connection_options())
+
+ # Constructing the command.
+ for key, value in options.items():
+ if value is not None:
+ cmd.extend([key, value])
+
+ return self.execute_command(cmd)
+
+ def create_connection(self):
+ status = self.connection_update('create')
+ if self.create_connection_up:
+ status = self.up_connection()
+ return status
+
+ @property
+ def create_connection_up(self):
+ if self.type in ('bond', 'ethernet'):
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ elif self.type == 'team':
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ return False
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ return self.connection_update('modify')
+
+ def show_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'show', self.conn_name]
+
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$')
+
+ conn_info = dict()
+ for line in out.splitlines():
+ pair = line.split(':', 1)
+ key = pair[0].strip()
+ key_type = self.settings_type(key)
+ if key and len(pair) > 1:
+ raw_value = pair[1].lstrip()
+ if raw_value == '--':
+ conn_info[key] = None
+ elif key == 'bond.options':
+ # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
+ opts = raw_value.split(',')
+ for opt in opts:
+ alias_pair = opt.split('=', 1)
+ if len(alias_pair) > 1:
+ alias_key = alias_pair[0]
+ alias_value = alias_pair[1]
+ conn_info[alias_key] = alias_value
+ elif key_type == list:
+ conn_info[key] = [s.strip() for s in raw_value.split(',')]
+ else:
+ m_enum = p_enum_value.match(raw_value)
+ if m_enum is not None:
+ value = m_enum.group(1)
+ else:
+ value = raw_value
+ conn_info[key] = value
+
+ return conn_info
+
+ def _compare_conn_params(self, conn_info, options):
+ # See nmcli(1) for details
+ param_alias = {
+ 'type': 'connection.type',
+ 'con-name': 'connection.id',
+ 'autoconnect': 'connection.autoconnect',
+ 'ifname': 'connection.interface-name',
+ 'mac': self.mac_setting,
+ 'master': 'connection.master',
+ 'slave-type': 'connection.slave-type',
+ }
+
+ changed = False
+ diff_before = dict()
+ diff_after = dict()
+
+ for key, value in options.items():
+ if not value:
+ continue
+
+ if key in conn_info:
+ current_value = conn_info[key]
+ elif key in param_alias:
+ real_key = param_alias[key]
+ if real_key in conn_info:
+ current_value = conn_info[real_key]
+ else:
+ # alias parameter does not exist
+ current_value = None
+ else:
+ # parameter does not exist
+ current_value = None
+
+ if isinstance(current_value, list) and isinstance(value, list):
+ # compare values between two lists
+ if sorted(current_value) != sorted(value):
+ changed = True
+ else:
+ if current_value != to_text(value):
+ changed = True
+
+ diff_before[key] = current_value
+ diff_after[key] = value
+
+ diff = {
+ 'before': diff_before,
+ 'after': diff_after,
+ }
+ return (changed, diff)
+
+ def is_connection_changed(self):
+ options = {
+ 'connection.interface-name': self.ifname,
+ }
+ options.update(self.connection_options(detect_change=True))
+ return self._compare_conn_params(self.show_connection(), options)
+
+
+def main():
+ # Parsing argument file
+ module = AnsibleModule(
+ argument_spec=dict(
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
+ ip4=dict(type='str'),
+ gw4=dict(type='str'),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='str'),
+ gw6=dict(type='str'),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool', default=True),
+ path_cost=dict(type='int', default=100),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli = Nmcli(module)
+
+ (rc, out, err) = (None, '', '')
+ result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="Please specify a name for the connection")
+ # team-slave checks
+ if nmcli.type == 'team-slave':
+ if nmcli.master is None:
+ nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
+ if nmcli.ifname is None:
+ nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
+
+ try:
+ if nmcli.state == 'absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state == 'present':
+ if nmcli.connection_exists():
+ changed, diff = nmcli.is_connection_changed()
+ if module._diff:
+ result['diff'] = diff
+
+ if changed:
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists'] = 'Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.modify_connection()
+ else:
+ result['Exists'] = 'Connections already exist and no changes made'
+ if module.check_mode:
+ module.exit_json(changed=False, **result)
+ if not nmcli.connection_exists():
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+ except NmcliModuleError as e:
+ module.fail_json(name=nmcli.conn_name, msg=str(e))
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py
new file mode 100644
index 00000000..9d4a5186
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+
+# (c) 2016, Marcin Skarbek <github@skarbek.name>
+# (c) 2016, Andreas Olsson <andreas@arrakis.se>
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+#
+# This module was ported from https://github.com/mskarbek/ansible-nsupdate
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nsupdate
+
+short_description: Manage DNS records.
+description:
+ - Create, update and remove DNS records using DDNS updates
+requirements:
+ - dnspython
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ port:
+ description:
+ - Use this TCP port when connecting to C(server).
+ default: 53
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS C(server)
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ key_algorithm:
+ description:
+ - Specify key algorithm used by C(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
+ 'hmac-sha512']
+ default: 'hmac-md5'
+ zone:
+ description:
+ - DNS record will be modified on this C(zone).
+ - When omitted DNS will be queried to attempt finding the correct zone.
+ - Starting with Ansible 2.7 this parameter is optional.
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ value:
+ description:
+ - Sets the record value.
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+'''
+
+EXAMPLES = '''
+- name: Add or modify ansible.example.org A to 192.168.1.1"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: "192.168.1.1"
+
+- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
+
+- name: Remove puppet.example.org CNAME
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "puppet"
+ type: "CNAME"
+ state: absent
+
+- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ value: "ansible.example.org."
+ state: present
+
+- name: Remove 1.1.168.192.in-addr.arpa. PTR
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: If module has modified record
+ returned: success
+ type: str
+record:
+ description: DNS record
+ returned: success
+ type: str
+ sample: 'ansible'
+ttl:
+ description: DNS record TTL
+ returned: success
+ type: int
+ sample: 86400
+type:
+ description: DNS record type
+ returned: success
+ type: str
+ sample: 'CNAME'
+value:
+ description: DNS record value(s)
+ returned: success
+ type: list
+ sample: '192.168.1.1'
+zone:
+ description: DNS record zone
+ returned: success
+ type: str
+ sample: 'example.org.'
+dns_rc:
+ description: dnspython return code
+ returned: always
+ type: int
+ sample: 4
+dns_rc_str:
+ description: dnspython return code (string representation)
+ returned: always
+ type: str
+ sample: 'REFUSED'
+'''
+
+import traceback
+
+from binascii import Error as binascii_error
+from socket import error as socket_error
+
+DNSPYTHON_IMP_ERR = None
+try:
+ import dns.update
+ import dns.query
+ import dns.tsigkeyring
+ import dns.message
+ import dns.resolver
+
+ HAVE_DNSPYTHON = True
+except ImportError:
+ DNSPYTHON_IMP_ERR = traceback.format_exc()
+ HAVE_DNSPYTHON = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class RecordManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['key_name']:
+ try:
+ self.keyring = dns.tsigkeyring.from_text({
+ module.params['key_name']: module.params['key_secret']
+ })
+ except TypeError:
+ module.fail_json(msg='Missing key_secret')
+ except binascii_error as e:
+ module.fail_json(msg='TSIG key error: %s' % to_native(e))
+ else:
+ self.keyring = None
+
+ if module.params['key_algorithm'] == 'hmac-md5':
+ self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
+ else:
+ self.algorithm = module.params['key_algorithm']
+
+ if module.params['zone'] is None:
+ if module.params['record'][-1] != '.':
+ self.module.fail_json(msg='record must be absolute when omitting zone parameter')
+ self.zone = self.lookup_zone()
+ else:
+ self.zone = module.params['zone']
+
+ if self.zone[-1] != '.':
+ self.zone += '.'
+
+ if module.params['record'][-1] != '.':
+ self.fqdn = module.params['record'] + '.' + self.zone
+ else:
+ self.fqdn = module.params['record']
+
+ if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None:
+ self.value = list(map(self.txt_helper, self.module.params['value']))
+ else:
+ self.value = self.module.params['value']
+
+ self.dns_rc = 0
+
+ def txt_helper(self, entry):
+ if entry[0] == '"' and entry[-1] == '"':
+ return entry
+ return '"{text}"'.format(text=entry)
+
+ def lookup_zone(self):
+ name = dns.name.from_text(self.module.params['record'])
+ while True:
+ query = dns.message.make_query(name, dns.rdatatype.SOA)
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
+ self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
+ self.module.params['server'], self.module.params['record']))
+ try:
+ zone = lookup.authority[0].name
+ if zone == name:
+ return zone.to_text()
+ except IndexError:
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
+
+ def __do_update(self, update):
+ response = None
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ return response
+
+ def create_or_update_record(self):
+ result = {'changed': False, 'failed': False}
+
+ exists = self.record_exists()
+ if exists in [0, 2]:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ if exists == 0:
+ self.dns_rc = self.create_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
+
+ elif exists == 2:
+ self.dns_rc = self.modify_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ else:
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ return result
+
+ def create_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ response = self.__do_update(update)
+ return dns.message.Message.rcode(response)
+
+ def modify_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+
+ return dns.message.Message.rcode(response)
+
+ def remove_record(self):
+ result = {'changed': False, 'failed': False}
+
+ if self.record_exists() == 0:
+ return result
+
+ # Check mode and record exists, declared fake change.
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
+ else:
+ result['changed'] = True
+
+ return result
+
+ def record_exists(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ try:
+ update.present(self.module.params['record'], self.module.params['type'])
+ except dns.rdatatype.UnknownRdatatype as e:
+ self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.module.params['state'] == 'absent':
+ return 1
+ for entry in self.value:
+ try:
+ update.present(self.module.params['record'], self.module.params['type'], entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.ttl_changed():
+ return 2
+ else:
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+ def ttl_changed(self):
+ query = dns.message.make_query(self.fqdn, self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ if lookup.rcode() != dns.rcode.NOERROR:
+ self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
+
+ current_ttl = lookup.answer[0].ttl
+ return current_ttl != self.module.params['ttl']
+
+
+def main():
+ tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
+ 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ server=dict(required=True, type='str'),
+ port=dict(required=False, default=53, type='int'),
+ key_name=dict(required=False, type='str'),
+ key_secret=dict(required=False, type='str', no_log=True),
+ key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(required=False, default=None, type='str'),
+ record=dict(required=True, type='str'),
+ type=dict(required=False, default='A', type='str'),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False, default=None, type='list'),
+ protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DNSPYTHON:
+ module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
+
+ if len(module.params["record"]) == 0:
+ module.fail_json(msg='record cannot be empty.')
+
+ record = RecordManager(module)
+ result = {}
+ if module.params["state"] == 'absent':
+ result = record.remove_record()
+ elif module.params["state"] == 'present':
+ result = record.create_or_update_record()
+
+ result['dns_rc'] = record.dns_rc
+ result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ result['record'] = dict(zone=record.zone,
+ record=module.params['record'],
+ type=module.params['type'],
+ ttl=module.params['ttl'],
+ value=record.value)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py
new file mode 100644
index 00000000..4e6738cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# copyright: (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: omapi_host
+short_description: Setup OMAPI hosts.
+description: Manage OMAPI hosts into compatible DHCPd servers
+requirements:
+ - pypureomapi
+author:
+- Loic Blot (@nerzhul)
+options:
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if state=present).
+ type: str
+ aliases: [ name ]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: no
+
+'''
+EXAMPLES = r'''
+- name: Add a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.98.4.55
+ macaddr: 44:dd:ab:dd:11:44
+ name: server01
+ ip: 192.168.88.99
+ ddns: yes
+ statements:
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
+ state: present
+
+- name: Remove a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.1.1.1
+ macaddr: 00:66:ab:dd:11:44
+ state: absent
+'''
+
+RETURN = r'''
+lease:
+ description: dictionary containing host information
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: hardware type, generally '1'
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: hostname
+ returned: success
+ type: str
+ sample: 'mydesktop'
+'''
+
+import binascii
+import socket
+import struct
+import traceback
+
+PUREOMAPI_IMP_ERR = None
+try:
+ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
+ from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
+ from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
+ pureomapi_found = True
+except ImportError:
+ PUREOMAPI_IMP_ERR = traceback.format_exc()
+ pureomapi_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+
+
+class OmapiHostManager:
+ def __init__(self, module):
+ self.module = module
+ self.omapi = None
+ self.connect()
+
+ def connect(self):
+ try:
+ self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']),
+ self.module.params['key'])
+ except binascii.Error:
+ self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
+ except OmapiError as e:
+ self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
+ "are valid. Exception was: %s" % to_native(e))
+ except socket.error as e:
+ self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
+
+ def get_host(self, macaddr):
+ msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
+ msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
+ msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ return None
+ return response
+
+ @staticmethod
+ def unpack_facts(obj):
+ result = dict(obj)
+ if 'hardware-address' in result:
+ result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')]))
+
+ if 'ip-address' in result:
+ result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')]))
+
+ if 'hardware-type' in result:
+ result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')])
+
+ return result
+
+ def setup_host(self):
+ if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
+ self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
+
+ msg = None
+ host_response = self.get_host(self.module.params['macaddr'])
+ # If host was not found using macaddr, add create message
+ if host_response is None:
+ msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
+ msg.message.append((to_bytes('create'), struct.pack('!I', 1)))
+ msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr'])))
+ msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname'])))
+ if self.module.params['ip'] is not None:
+ msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
+
+ stmt_join = ""
+ if self.module.params['ddns']:
+ stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
+
+ try:
+ if len(self.module.params['statements']) > 0:
+ stmt_join += "; ".join(self.module.params['statements'])
+ stmt_join += "; "
+ except TypeError as e:
+ self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
+
+ if len(stmt_join) > 0:
+ msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+ # Forge update message
+ else:
+ response_obj = self.unpack_facts(host_response.obj)
+ fields_to_update = {}
+
+ if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
+
+ # Name cannot be changed
+ if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
+ self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
+ "Please delete host and add new." %
+ (response_obj['name'], self.module.params['hostname']))
+
+ """
+ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
+ if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
+ response_obj['statements'] != self.module.params['statements']:
+ with open('/tmp/omapi', 'w') as fb:
+ for (k,v) in iteritems(response_obj):
+ fb.writelines('statements: %s %s\n' % (k, v))
+ """
+ if len(fields_to_update) == 0:
+ self.module.exit_json(changed=False, lease=response_obj)
+ else:
+ msg = OmapiMessage.update(host_response.handle)
+ msg.update_object(fields_to_update)
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_STATUS:
+ self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True)
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+ def remove_host(self):
+ try:
+ self.omapi.del_host(self.module.params['macaddr'])
+ self.module.exit_json(changed=True)
+ except OmapiErrorNotFound:
+ self.module.exit_json()
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ host=dict(type='str', default="localhost"),
+ port=dict(type='int', default=7911),
+ key_name=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=True),
+ macaddr=dict(type='str', required=True),
+ hostname=dict(type='str', aliases=['name']),
+ ip=dict(type='str'),
+ ddns=dict(type='bool', default=False),
+ statements=dict(type='list', default=[]),
+ ),
+ supports_check_mode=False,
+ )
+
+ if not pureomapi_found:
+ module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
+
+ if module.params['key'] is None or len(module.params["key"]) == 0:
+ module.fail_json(msg="'key' parameter cannot be empty.")
+
+ if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
+ module.fail_json(msg="'key_name' parameter cannot be empty.")
+
+ host_manager = OmapiHostManager(module)
+ try:
+ if module.params['state'] == 'present':
+ host_manager.setup_host()
+ elif module.params['state'] == 'absent':
+ host_manager.remove_host()
+ except ValueError as e:
+ module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py
new file mode 100644
index 00000000..661db460
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Networklore's snmp library for Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snmp_facts
+author:
+- Patrick Ogenstad (@ogenstad)
+short_description: Retrieve facts for a device using SNMP
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target SNMP server (normally C({{ inventory_hostname }})).
+ type: str
+ required: true
+ version:
+ description:
+ - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ type: str
+ required: true
+ choices: [ v2, v2c, v3 ]
+ community:
+ description:
+ - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ type: str
+ level:
+ description:
+ - Authentication level.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ authNoPriv, authPriv ]
+ username:
+ description:
+ - Username for SNMPv3.
+ - Required if I(version) is C(v3).
+ type: str
+ integrity:
+ description:
+ - Hashing algorithm.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ md5, sha ]
+ authkey:
+ description:
+ - Authentication key.
+ - Required I(version) is C(v3).
+ type: str
+ privacy:
+ description:
+ - Encryption algorithm.
+ - Required if I(level) is C(authPriv).
+ type: str
+ choices: [ aes, des ]
+ privkey:
+ description:
+ - Encryption key.
+ - Required if I(level) is C(authPriv).
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Gather facts with SNMP version 2
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v2c
+ community: public
+ delegate_to: local
+
+- name: Gather facts using SNMP version 3
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+ansible_sysdescr:
+ description: A textual description of the entity.
+ returned: success
+ type: str
+ sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ansible_sysobjectid:
+ description: The vendor's authoritative identification of the network management subsystem contained in the entity.
+ returned: success
+ type: str
+ sample: 1.3.6.1.4.1.8072.3.2.10
+ansible_sysuptime:
+ description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized.
+ returned: success
+ type: int
+ sample: 42388
+ansible_syscontact:
+ description: The textual identification of the contact person for this managed node, together with information on how to contact this person.
+ returned: success
+ type: str
+ sample: Me <me@example.org>
+ansible_sysname:
+ description: An administratively-assigned name for this managed node.
+ returned: success
+ type: str
+ sample: ubuntu-user
+ansible_syslocation:
+ description: The physical location of this node (e.g., `telephone closet, 3rd floor').
+ returned: success
+ type: str
+ sample: Sitting on the Dock of the Bay
+ansible_all_ipv4_addresses:
+ description: List of all IPv4 addresses.
+ returned: success
+ type: list
+ sample: ["127.0.0.1", "172.17.0.1"]
+ansible_interfaces:
+ description: Dictionary of each network interface and its metadata.
+ returned: success
+ type: dict
+ sample: {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
+ }
+'''
+
+import binascii
+import traceback
+from collections import defaultdict
+
+PYSNMP_IMP_ERR = None
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pysnmp.proto.rfc1905 import EndOfMibView
+ HAS_PYSNMP = True
+except Exception:
+ PYSNMP_IMP_ERR = traceback.format_exc()
+ HAS_PYSNMP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_text
+
+
+class DefineOid(object):
+
+ def __init__(self, dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return to_text(binascii.unhexlify(hexstring[2:]))
+ return hexstring
+
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ return hexstring
+
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options:
+ return adminstatus_options[int_adminstatus]
+ return ""
+
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options:
+ return operstatus_options[int_operstatus]
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(type='str'),
+ username=dict(type='str'),
+ level=dict(type='str', choices=['authNoPriv', 'authPriv']),
+ integrity=dict(type='str', choices=['md5', 'sha']),
+ privacy=dict(type='str', choices=['aes', 'des']),
+ authkey=dict(type='str', no_log=True),
+ privkey=dict(type='str', no_log=True),
+ ),
+ required_together=(
+ ['username', 'level', 'integrity', 'authkey'],
+ ['privacy', 'privkey'],
+ ),
+ supports_check_mode=False,
+ )
+
+ m_args = module.params
+
+ if not HAS_PYSNMP:
+ module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR)
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] in ("v2", "v2c"):
+ if m_args['community'] is None:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] is None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] in ("v2", "v2c"):
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ def Tree():
+ return defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ if isinstance(val, EndOfMibView):
+ continue
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifSpeed in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if current_interface not in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py
new file mode 100644
index 00000000..5d63a5b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: netcup_dns
+notes: []
+short_description: manage Netcup DNS records
+description:
+ - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
+options:
+ api_key:
+ description:
+ - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
+ required: True
+ type: str
+ api_password:
+ description:
+ - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net)
+ required: True
+ type: str
+ customer_id:
+ description:
+ - Netcup customer id
+ required: True
+ type: int
+ domain:
+ description:
+ - Domainname the records should be added / removed
+ required: True
+ type: str
+ record:
+ description:
+ - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
+ default: "@"
+ aliases: [ name ]
+ type: str
+ type:
+ description:
+ - Record type
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ required: True
+ type: str
+ value:
+ description:
+ - Record value
+ required: true
+ type: str
+ solo:
+ type: bool
+ default: False
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ priority:
+ description:
+ - Record priority. Required for C(type=MX)
+ required: False
+ type: int
+ state:
+ description:
+ - Whether the record should exist or not
+ required: False
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+requirements:
+ - "nc-dnsapi >= 0.1.3"
+author: "Nicolai Buchwitz (@nbuchwitz)"
+
+'''
+
+EXAMPLES = '''
+- name: Create a record of type A
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+
+- name: Delete that record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ state: absent
+
+- name: Create a wildcard record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "*"
+ type: "A"
+ value: "127.0.1.1"
+
+- name: Set the MX record for example.com
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ type: "MX"
+ value: "mail.example.com"
+
+- name: Set a record and ensure that this is the only one
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ name: "demo"
+ domain: "example.com"
+ type: "AAAA"
+ value: "::1"
+ solo: true
+'''
+
+RETURN = '''
+records:
+ description: list containing all records
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: the record name
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: the record type
+ returned: succcess
+ type: str
+ sample: A
+ value:
+ description: the record destination
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: the record priority (only relevant if type=MX)
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: internal id of the record
+ returned: success
+ type: int
+ sample: 12345
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+NCDNSAPI_IMP_ERR = None
+try:
+ import nc_dnsapi
+ from nc_dnsapi import DNSRecord
+
+ HAS_NCDNSAPI = True
+except ImportError:
+ NCDNSAPI_IMP_ERR = traceback.format_exc()
+ HAS_NCDNSAPI = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_password=dict(required=True, no_log=True),
+ customer_id=dict(required=True, type='int'),
+
+ domain=dict(required=True),
+ record=dict(required=False, default='@', aliases=['name']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ value=dict(required=True),
+ priority=dict(required=False, type='int'),
+ solo=dict(required=False, type='bool', default=False),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NCDNSAPI:
+ module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
+
+ api_key = module.params.get('api_key')
+ api_password = module.params.get('api_password')
+ customer_id = module.params.get('customer_id')
+ domain = module.params.get('domain')
+ record_type = module.params.get('type')
+ record = module.params.get('record')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ solo = module.params.get('solo')
+ state = module.params.get('state')
+
+ if record_type == 'MX' and not priority:
+ module.fail_json(msg="record type MX required the 'priority' argument")
+
+ has_changed = False
+ all_records = []
+ try:
+ with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
+ all_records = api.dns_records(domain)
+ record = DNSRecord(record, record_type, value, priority=priority)
+
+ # try to get existing record
+ record_exists = False
+ for r in all_records:
+ if r == record:
+ record_exists = True
+ record = r
+
+ break
+
+ if state == 'present':
+ if solo:
+ obsolete_records = [r for r in all_records if
+ r.hostname == record.hostname
+ and r.type == record.type
+ and not r.destination == record.destination]
+
+ if obsolete_records:
+ if not module.check_mode:
+ all_records = api.delete_dns_records(domain, obsolete_records)
+
+ has_changed = True
+
+ if not record_exists:
+ if not module.check_mode:
+ all_records = api.add_dns_record(domain, record)
+
+ has_changed = True
+ elif state == 'absent' and record_exists:
+ if not module.check_mode:
+ all_records = api.delete_dns_record(domain, record)
+
+ has_changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=ex.message)
+
+ module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
+
+
+def record_data(r):
+ return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
new file mode 100644
index 00000000..af953e0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ type: str
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ type: str
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ type: str
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ type: str
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ type: str
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ type: str
+ description:
+ - Name of the application
+ required: false
+ environment:
+ type: str
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify newrelic about an app deployment
+ community.general.newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py
new file mode 100644
index 00000000..e6135cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+author: "Matt Martz (@sivel)"
+options:
+ api_key:
+ type: str
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ type: int
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ type: list
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ community.general.nexmo:
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+"""
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except Exception:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py
new file mode 100644
index 00000000..3a68f8da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nginx_status_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.nginx_status_info) instead.
+short_description: Retrieve nginx status facts.
+description:
+ - Gathers facts from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
+'''
+
+EXAMPLES = '''
+# Gather status facts from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+
+# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+ timeout: 20
+'''
+
+RETURN = '''
+---
+nginx_status_facts.active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+nginx_status_facts.accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+nginx_status_facts.reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+nginx_status_facts.writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+nginx_status_facts.waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+nginx_status_facts.data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusFacts(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'nginx_status_facts': {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['nginx_status_facts']['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['nginx_status_facts']['active_connections'] = int(match.group(1))
+ result['nginx_status_facts']['accepts'] = int(match.group(2))
+ result['nginx_status_facts']['handled'] = int(match.group(3))
+ result['nginx_status_facts']['requests'] = int(match.group(4))
+ result['nginx_status_facts']['reading'] = int(match.group(5))
+ result['nginx_status_facts']['writing'] = int(match.group(6))
+ result['nginx_status_facts']['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_facts = NginxStatusFacts().run()
+ result = dict(changed=False, ansible_facts=nginx_status_facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py
new file mode 100644
index 00000000..a13a57a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: nginx_status_info
+short_description: Retrieve information on nginx status.
+description:
+ - Gathers information from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
+'''
+
+EXAMPLES = r'''
+# Gather status info from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ register: result
+
+# Gather status info from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ timeout: 20
+ register: result
+'''
+
+RETURN = r'''
+---
+active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusInfo(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['active_connections'] = int(match.group(1))
+ result['accepts'] = int(match.group(2))
+ result['handled'] = int(match.group(3))
+ result['requests'] = int(match.group(4))
+ result['reading'] = int(match.group(5))
+ result['writing'] = int(match.group(6))
+ result['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_info = NginxStatusInfo().run()
+ module.exit_json(changed=False, **nginx_status_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py
new file mode 100644
index 00000000..7db7c5ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bruce Smith <Bruce.Smith.IT@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nictagadm
+short_description: Manage nic tags on SmartOS systems
+description:
+ - Create or delete nic tags on SmartOS systems.
+author:
+- Bruce Smith (@SmithX10)
+options:
+ name:
+ description:
+ - Name of the nic tag.
+ required: true
+ type: str
+ mac:
+ description:
+ - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
+ - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ type: str
+ etherstub:
+ description:
+ - Specifies that the nic tag will be attached to a created I(etherstub).
+ - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ type: bool
+ default: no
+ mtu:
+ description:
+ - Specifies the size of the I(mtu) of the desired nic tag.
+ - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ type: int
+ force:
+ description:
+ - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ type: bool
+ default: no
+ state:
+ description:
+ - Create or delete a SmartOS nic tag.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
+ community.general.nictagadm:
+ name: storage0
+ mac: 00:1b:21:a3:f5:4d
+ mtu: 9000
+ state: present
+
+- name: Remove 'storage0' nic tag
+ community.general.nictagadm:
+ name: storage0
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: nic tag name
+ returned: always
+ type: str
+ sample: storage0
+mac:
+ description: MAC Address that the nic tag was attached to.
+ returned: always
+ type: str
+ sample: 00:1b:21:a3:f5:4d
+etherstub:
+ description: specifies if the nic tag will create and attach to an etherstub.
+ returned: always
+ type: bool
+ sample: False
+mtu:
+ description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ returned: always
+ type: int
+ sample: 1500
+force:
+ description: Shows if -f was used during the deletion of a nic tag
+ returned: always
+ type: bool
+ sample: False
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+
+
+class NicTag(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.mac = module.params['mac']
+ self.etherstub = module.params['etherstub']
+ self.mtu = module.params['mtu']
+ self.force = module.params['force']
+ self.state = module.params['state']
+
+ self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
+
+ def is_valid_mac(self):
+ return is_mac(self.mac.lower())
+
+ def nictag_exists(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('exists')
+ cmd.append(self.name)
+
+ (rc, dummy, dummy) = self.module.run_command(cmd)
+
+ return rc == 0
+
+ def add_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('add')
+
+ if self.etherstub:
+ cmd.append('-l')
+
+ if self.mtu:
+ cmd.append('-p')
+ cmd.append('mtu=' + str(self.mtu))
+
+ if self.mac:
+ cmd.append('-p')
+ cmd.append('mac=' + str(self.mac))
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('delete')
+
+ if self.force:
+ cmd.append('-f')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ mac=dict(type='str'),
+ etherstub=dict(type='bool', default=False),
+ mtu=dict(type='int'),
+ force=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ mutually_exclusive=[
+ ['etherstub', 'mac'],
+ ['etherstub', 'mtu'],
+ ],
+ required_if=[
+ ['etherstub', False, ['name', 'mac']],
+ ['state', 'absent', ['name', 'force']],
+ ],
+ supports_check_mode=True
+ )
+
+ nictag = NicTag(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ changed=False,
+ etherstub=nictag.etherstub,
+ force=nictag.force,
+ name=nictag.name,
+ mac=nictag.mac,
+ mtu=nictag.mtu,
+ state=nictag.state,
+ )
+
+ if not nictag.is_valid_mac():
+ module.fail_json(msg='Invalid MAC Address Value',
+ name=nictag.name,
+ mac=nictag.mac,
+ etherstub=nictag.etherstub)
+
+ if nictag.state == 'absent':
+ if nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.delete_nictag()
+ if rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+ elif nictag.state == 'present':
+ if not nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.add_nictag()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+
+ if rc is not None:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py
new file mode 100644
index 00000000..660c9bc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_a_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS A records
+description:
+ - Adds and/or removes instances of A record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:a) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ type: str
+ view:
+ description:
+ - Sets the DNS view to associate this A record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ type: str
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for this A record. Users can dynamically
+ allocate ipv4 address to A record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). See example
+ aliases:
+ - ipv4
+ type: str
+ ttl:
+ description:
+ - Configures the TTL to be associated with this A record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an A record from the system
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an A record name
+ community.general.nios_a_record:
+ name: {new_name: a_new.ansible.com, old_name: a.ansible.com}
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Dynamically add a record to next available ip
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: {nios_next_ip: 192.168.10.0/24}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_A_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_A_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py
new file mode 100644
index 00000000..b7caecee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_aaaa_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS AAAA records
+description:
+ - Adds and/or removes instances of AAAA record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this AAAA record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for this AAAA record.
+ aliases:
+ - ipv6
+ ttl:
+ description:
+ - Configures the TTL to be associated with this AAAA record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an AAAA record from the system
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an AAAA record name
+ community.general.nios_aaaa_record:
+ name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com}
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_AAAA_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py
new file mode 100644
index 00000000..2863d148
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_cname_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS CNAME records
+description:
+ - Adds and/or removes instances of CNAME record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:cname) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this CNAME record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ canonical:
+ description:
+ - Configures the canonical name for this CNAME record.
+ aliases:
+ - cname
+ ttl:
+ description:
+ - Configures the TTL to be associated with this CNAME record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a CNAME record from the system
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_CNAME_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ canonical=dict(aliases=['cname'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_CNAME_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py
new file mode 100644
index 00000000..1bb8d068
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_dns_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS views
+description:
+ - Adds and/or removes instances of DNS view objects from
+ Infoblox NIOS servers. This module manages NIOS C(view) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of DNS view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - view
+ network_view:
+ description:
+ - Specifies the name of the network view to assign the configured
+ DNS view to. The network view must already be configured on the
+ target system.
+ default: default
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for dns view
+ community.general.nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the dns view instance
+ community.general.nios_dns_view:
+ name: {new_name: ansible-dns-new, old_name: ansible-dns}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['view'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_DNS_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py
new file mode 100644
index 00000000..a46db04f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_fixed_address
+author: "Sumit Jaiswal (@sjaiswal)"
+short_description: Configure Infoblox NIOS DHCP Fixed Address
+description:
+ - A fixed address is a specific IP address that a DHCP server
+ always assigns when a lease request comes from a particular
+ MAC address of the client.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the hostname with which fixed DHCP ip-address is stored
+ for respective mac.
+ required: true
+ ipaddr:
+ description:
+ - IPV4/V6 address of the fixed address.
+ required: true
+ mac:
+ description:
+ - The MAC address of the interface.
+ required: true
+ network:
+ description:
+ - Specifies the network range in which ipaddr exists.
+ required: true
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: false
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv6_fixed
+ ipaddr: fe80::1/10
+ mac: 08:6d:41:e8:fd:e8
+ network: fe80::/64
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a ipv4 fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. The use_option field only applies
+ to special options that are displayed separately from other options and
+ have a use flag. This function removes the use_option flag from all
+ other options. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ special_options = ['routers', 'router-templates', 'domain-name-servers',
+ 'domain-name', 'broadcast-address', 'broadcast-address-offset',
+ 'dhcp-lease-time', 'dhcp6.name-servers']
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ if opt['name'] not in special_options:
+ del opt['use_option']
+ options.append(opt)
+ return options
+
+
+def validate_ip_addr_type(ip, arg_spec, module):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
+ '''
+ check_ip = ip.split('/')
+
+ if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv4addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
+ elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv6addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ name=dict(required=True),
+ ipaddr=dict(required=True, ib_req=True),
+ mac=dict(required=True, ib_req=True),
+ network=dict(required=True),
+ network_view=dict(default='default'),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ # to modify argument based on ipaddr type i.e. IPV4/IPV6
+ fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
+
+ wapi = WapiModule(module)
+
+ result = wapi.run(fixed_address_ip_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py
new file mode 100644
index 00000000..efab39de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_host_record
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS host records
+description:
+ - Adds and/or removes instances of host record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:host) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of host record object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this host record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ configure_for_dns:
+ description:
+ - Sets the DNS to particular parent. If user needs to bypass DNS
+ user can make the value to false.
+ type: bool
+ required: false
+ default: true
+ aliases:
+ - dns
+ ipv4addrs:
+ description:
+ - Configures the IPv4 addresses for this host record. This argument
+ accepts a list of values (see suboptions)
+ aliases:
+ - ipv4
+ type: list
+ elements: dict
+ suboptions:
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for the host record. Users can dynamically
+ allocate ipv4 address to host record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). If user wants to add or
+ remove the ipv4 address from existing record, I(add/remove)
+ params need to be used. See examples
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ - dhcp
+ mac:
+ description:
+ - Configures the hardware MAC address for the host record. If user makes
+ DHCP to true, user need to mention MAC address.
+ required: false
+ add:
+ description:
+ - If user wants to add the ipv4 address to an existing host record.
+ Note that with I(add) user will have to keep the I(state) as I(present),
+ as new IP address is allocated to existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ remove:
+ description:
+ - If user wants to remove the ipv4 address from an existing host record.
+ Note that with I(remove) user will have to change the I(state) to I(absent),
+ as IP address is de-allocated from an existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ ipv6addrs:
+ description:
+ - Configures the IPv6 addresses for the host record. This argument
+ accepts a list of values (see options)
+ aliases:
+ - ipv6
+ type: list
+ elements: dict
+ suboptions:
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for the host record
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ description:
+ - Configures an optional list of additional aliases to add to the host
+ record. These are equivalent to CNAMEs but held within a host
+ record. Must be in list format.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an ipv4 host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ aliases:
+ - cname.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a comment to an existing host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a host record from the system
+ community.general.nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update an ipv4 host record
+ community.general.nios_host_record:
+ name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record bypassing DNS
+ community.general.nios_host_record:
+ name: new_host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record over DHCP
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Dynamically add host record to next available ip
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: {nios_next_ip: 192.168.10.0/24}
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.2
+ add: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ remove: true
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
+
+
+def ipaddr(module, key, filtered_keys=None):
+ ''' Transforms the input value into a struct supported by WAPI
+ This function will transform the input from the playbook into a struct
+ that is valid for WAPI in the form of:
+ {
+ ipv4addr: <value>,
+ mac: <value>
+ }
+ This function does not validate the values are properly formatted or in
+ the acceptable range, that is left to WAPI.
+ '''
+ filtered_keys = filtered_keys or list()
+ objects = list()
+ for item in module.params[key]:
+ objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
+ return objects
+
+
+def ipv4addrs(module):
+ return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
+
+
+def ipv6addrs(module):
+ return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4addr_spec = dict(
+ ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
+ mac=dict(required=False, ib_req=True),
+ add=dict(type='bool', required=False),
+ remove=dict(type='bool', required=False)
+ )
+
+ ipv6addr_spec = dict(
+ ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, ib_req=True),
+ mac=dict(required=False, ib_req=True)
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
+ ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
+ configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
+ aliases=dict(type='list'),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_HOST_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py
new file mode 100644
index 00000000..aff8ca93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_member
+author: "Krishna Vasudevan (@krisvasudevan)"
+short_description: Configure Infoblox NIOS members
+description:
+ - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ host_name:
+ description:
+ - Specifies the host name of the member to either add or remove from
+ the NIOS instance.
+ required: true
+ aliases:
+ - name
+ vip_setting:
+ description:
+ - Configures the network settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of the Grid Member
+ subnet_mask:
+ description:
+ - The subnet mask for the Grid Member
+ gateway:
+ description:
+ - The default gateway for the Grid Member
+ ipv6_setting:
+ description:
+ - Configures the IPv6 settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of the Grid Member
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix for the Grid Member
+ gateway:
+ description:
+ - The gateway address for the Grid Member
+ config_addr_type:
+ description:
+ - Address configuration type (IPV4/IPV6/BOTH)
+ default: IPV4
+ comment:
+ description:
+ - A descriptive comment of the Grid member.
+ extattrs:
+ description:
+ - Extensible attributes associated with the object.
+ enable_ha:
+ description:
+ - If set to True, the member has two physical nodes (HA pair).
+ type: bool
+ router_id:
+ description:
+ - Virtual router identifier. Provide this ID if "ha_enabled" is set to "true". This is a unique VRID number (from 1 to 255) for the local subnet.
+ lan2_enabled:
+ description:
+ - When set to "true", the LAN2 port is enabled as an independent port or as a port for failover purposes.
+ type: bool
+ lan2_port_setting:
+ description:
+ - Settings for the Grid member LAN2 port if 'lan2_enabled' is set to "true".
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - If set to True, then it has its own IP settings.
+ type: bool
+ network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv4 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of LAN2
+ subnet_mask:
+ description:
+ - The subnet mask of LAN2
+ gateway:
+ description:
+ - The default gateway of LAN2
+ v6_network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv6 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of LAN2
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of LAN2
+ gateway:
+ description:
+ - The gateway address of LAN2
+ platform:
+ description:
+ - Configures the Hardware Platform.
+ default: INFOBLOX
+ node_info:
+ description:
+ - Configures the node information list with detailed status report on the operations of the Grid Member.
+ type: list
+ elements: dict
+ suboptions:
+ lan2_physical_setting:
+ description:
+ - Physical port settings for the LAN2 interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_ha_port_setting:
+ description:
+ - LAN/HA port settings for the node.
+ type: list
+ elements: dict
+ suboptions:
+ ha_ip_address:
+ description:
+ - HA IP address.
+ ha_port_setting:
+ description:
+ - Physical port settings for the HA interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_port_setting:
+ description:
+ - Physical port settings for the LAN interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ mgmt_ipv6addr:
+ description:
+ - Public IPv6 address for the LAN1 interface.
+ mgmt_lan:
+ description:
+ - Public IPv4 address for the LAN1 interface.
+ mgmt_network_setting:
+ description:
+ - Network settings for the MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of MGMT
+ subnet_mask:
+ description:
+ - The subnet mask of MGMT
+ gateway:
+ description:
+ - The default gateway of MGMT
+ v6_mgmt_network_setting:
+ description:
+ - The network settings for the IPv6 MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of MGMT
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of MGMT
+ gateway:
+ description:
+ - The gateway address of MGMT
+ mgmt_port_setting:
+ description:
+ - Settings for the member MGMT port.
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - Determines if MGMT port settings should be enabled.
+ type: bool
+ security_access_enabled:
+ description:
+ - Determines if security access on the MGMT port is enabled or not.
+ type: bool
+ vpn_enabled:
+ description:
+ - Determines if VPN on the MGMT port is enabled or not.
+ type: bool
+ upgrade_group:
+ description:
+ - The name of the upgrade group to which this Grid member belongs.
+ default: Default
+ use_syslog_proxy_setting:
+ description:
+ - Use flag for external_syslog_server_enable , syslog_servers, syslog_proxy_setting, syslog_size
+ type: bool
+ external_syslog_server_enable:
+ description:
+ - Determines if external syslog servers should be enabled
+ type: bool
+ syslog_servers:
+ description:
+ - The list of external syslog servers.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The server address.
+ category_list:
+ description:
+ - The list of all syslog logging categories.
+ connection_type:
+ description:
+ - The connection type for communicating with this server.(STCP/TCP?UDP)
+ default: UDP
+ local_interface:
+ description:
+ - The local interface through which the appliance sends syslog messages to the syslog server.(ANY/LAN/MGMT)
+ default: ANY
+ message_node_id:
+ description:
+ - Identify the node in the syslog message. (HOSTNAME/IP_HOSTNAME/LAN/MGMT)
+ default: LAN
+ message_source:
+ description:
+ - The source of syslog messages to be sent to the external syslog server.
+ default: ANY
+ only_category_list:
+ description:
+ - The list of selected syslog logging categories. The appliance forwards syslog messages that belong to the selected categories.
+ type: bool
+ port:
+ description:
+ - The port this server listens on.
+ default: 514
+ severity:
+ description:
+ - The severity filter. The appliance sends log messages of the specified severity and above to the external syslog server.
+ default: DEBUG
+ pre_provisioning:
+ description:
+ - Pre-provisioning information.
+ type: list
+ elements: dict
+ suboptions:
+ hardware_info:
+ description:
+ - An array of structures that describe the hardware being pre-provisioned.
+ type: list
+ elements: dict
+ suboptions:
+ hwmodel:
+ description:
+ - Hardware model
+ hwtype:
+ description:
+ - Hardware type.
+ licenses:
+ description:
+ - An array of license types.
+ create_token:
+ description:
+ - Flag for initiating a create token request for pre-provisioned members.
+ type: bool
+ default: False
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Add a member to the grid with IPv4 address
+ community.general.nios_member:
+ host_name: member01.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a HA member to the grid
+ community.general.nios_member:
+ host_name: memberha.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ enable_ha: true
+ router_id: 150
+ node_info:
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.70
+ mgmt_lan: 192.168.1.80
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.71
+ mgmt_lan: 192.168.1.81
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the member with pre-provisioning details specified
+ community.general.nios_member:
+ name: member01.localdomain
+ pre_provisioning:
+ - hardware_info:
+ - hwmodel: IB-VM-820
+ hwtype: IB-VNIOS
+ licenses:
+ - dns
+ - dhcp
+ - enterprise
+ - vnios
+ comment: "Updated by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the member
+ community.general.nios_member:
+ name: member01.localdomain
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MEMBER
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4_spec = dict(
+ address=dict(),
+ subnet_mask=dict(),
+ gateway=dict(),
+ )
+
+ ipv6_spec = dict(
+ virtual_ip=dict(),
+ cidr_prefix=dict(type='int'),
+ gateway=dict(),
+ )
+
+ port_spec = dict(
+ auto_port_setting_enabled=dict(type='bool'),
+ duplex=dict(),
+ speed=dict(),
+ )
+
+ lan2_port_spec = dict(
+ enabled=dict(type='bool'),
+ network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ ha_port_spec = dict(
+ ha_ip_address=dict(),
+ ha_port_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_port_setting=dict(type='list', elements='dict', options=port_spec),
+ mgmt_lan=dict(),
+ mgmt_ipv6addr=dict(),
+ )
+
+ node_spec = dict(
+ lan2_physical_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_ha_port_setting=dict(type='list', elements='dict', options=ha_port_spec),
+ mgmt_network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_mgmt_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ mgmt_port_spec = dict(
+ enabled=dict(type='bool'),
+ security_access_enabled=dict(type='bool'),
+ vpn_enabled=dict(type='bool'),
+ )
+
+ syslog_spec = dict(
+ address=dict(),
+ category_list=dict(type='list'),
+ connection_type=dict(default='UDP'),
+ local_interface=dict(default='ANY'),
+ message_node_id=dict(default='LAN'),
+ message_source=dict(default='ANY'),
+ only_category_list=dict(type='bool'),
+ port=dict(type='int', default=514),
+ severity=dict(default='DEBUG'),
+ )
+
+ hw_spec = dict(
+ hwmodel=dict(),
+ hwtype=dict(),
+ )
+
+ pre_prov_spec = dict(
+ hardware_info=dict(type='list', elements='dict', options=hw_spec),
+ licenses=dict(type='list'),
+ )
+
+ ib_spec = dict(
+ host_name=dict(required=True, aliases=['name'], ib_req=True),
+ vip_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ ipv6_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ config_addr_type=dict(default='IPV4'),
+ comment=dict(),
+ enable_ha=dict(type='bool', default=False),
+ router_id=dict(type='int'),
+ lan2_enabled=dict(type='bool', default=False),
+ lan2_port_setting=dict(type='list', elements='dict', options=lan2_port_spec),
+ platform=dict(default='INFOBLOX'),
+ node_info=dict(type='list', elements='dict', options=node_spec),
+ mgmt_port_setting=dict(type='list', elements='dict', options=mgmt_port_spec),
+ upgrade_group=dict(default='Default'),
+ use_syslog_proxy_setting=dict(type='bool'),
+ external_syslog_server_enable=dict(type='bool'),
+ syslog_servers=dict(type='list', elements='dict', options=syslog_spec),
+ pre_provisioning=dict(type='list', elements='dict', options=pre_prov_spec),
+ extattrs=dict(type='dict'),
+ create_token=dict(type='bool', default=False),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MEMBER, ib_spec)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py
new file mode 100644
index 00000000..ca1f1f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_mx_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS MX records
+description:
+ - Adds and/or removes instances of MX record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:mx) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ mail_exchanger:
+ description:
+ - Configures the mail exchanger FQDN for this MX record.
+ aliases:
+ - mx
+ preference:
+ description:
+ - Configures the preference (0-65535) for this MX record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an MX record from the system
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MX_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ mail_exchanger=dict(aliases=['mx'], ib_req=True),
+ preference=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MX_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py
new file mode 100644
index 00000000..de57e692
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_naptr_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS NAPTR records
+description:
+ - Adds and/or removes instances of NAPTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ order:
+ description:
+ - Configures the order (0-65535) for this NAPTR record. This parameter
+ specifies the order in which the NAPTR rules are applied when
+ multiple rules are present.
+ preference:
+ description:
+ - Configures the preference (0-65535) for this NAPTR record. The
+ preference field determines the order NAPTR records are processed
+ when multiple records with the same order parameter are present.
+ replacement:
+ description:
+ - Configures the replacement field for this NAPTR record.
+ For nonterminal NAPTR records, this field specifies the
+ next domain name to look up.
+ services:
+ description:
+ - Configures the services field (128 characters maximum) for this
+ NAPTR record. The services field contains protocol and service
+ identifiers, such as "http+E2U" or "SIPS+D2T".
+ required: false
+ flags:
+ description:
+ - Configures the flags field for this NAPTR record. These control the
+ interpretation of the fields for an NAPTR record object. Supported
+ values for the flags field are "U", "S", "P" and "A".
+ required: false
+ regexp:
+ description:
+ - Configures the regexp field for this NAPTR record. This is the
+ regular expression-based rewriting rule of the NAPTR record. This
+ should be a POSIX compliant regular expression, including the
+ substitution rule and flags. Refer to RFC 2915 for the field syntax
+ details.
+ required: false
+ ttl:
+ description:
+ - Configures the TTL to be associated with this NAPTR record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a NAPTR record from the system
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ order=dict(type='int', ib_req=True),
+ preference=dict(type='int', ib_req=True),
+ replacement=dict(ib_req=True),
+ services=dict(),
+ flags=dict(),
+ regexp=dict(),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:naptr', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py
new file mode 100644
index 00000000..54b8dfb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network object
+description:
+ - Adds and/or removes instances of network objects from
+ Infoblox NIOS servers. This module manages NIOS C(network) objects
+ using the Infoblox WAPI interface over REST.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ network:
+ description:
+ - Specifies the network to add or remove from the system. The value
+ should use CIDR notation.
+ required: true
+ aliases:
+ - name
+ - cidr
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: true
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure. The standard options are
+ C(router), C(router-templates), C(domain-name-servers), C(domain-name),
+ C(broadcast-address), C(broadcast-address-offset), C(dhcp-lease-time),
+ and C(dhcp6.name-servers).
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ container:
+ description:
+ - If set to true it'll create the network container to be added or removed
+ from the system.
+ type: bool
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a network ipv6
+ community.general.nios_network:
+ network: fe80::/64
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv4 network container
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 network container
+ community.general.nios_network:
+ network: fe80::/64
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 network container
+ community.general.nios_network:
+ networkr: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK, NIOS_IPV6_NETWORK
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK_CONTAINER, NIOS_IPV6_NETWORK_CONTAINER
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ options.append(opt)
+ return options
+
+
+def check_ip_addr_type(obj_filter, ib_spec):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox
+ network/networkcontainer type
+ '''
+
+ ip = obj_filter['network']
+ if 'container' in obj_filter and obj_filter['container']:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ del ib_spec['options'] # removing option argument as for network container it's not supported
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK_CONTAINER, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK_CONTAINER, ib_spec
+ else:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK, ib_spec
+
+
+def check_vendor_specific_dhcp_option(module, ib_spec):
+ '''This function will check if the argument dhcp option belongs to vendor-specific and if yes then will remove
+ use_options flag which is not supported with vendor-specific dhcp options.
+ '''
+ for key, value in iteritems(ib_spec):
+ if isinstance(module.params[key], list):
+ temp_dict = module.params[key][0]
+ if 'num' in temp_dict:
+ if temp_dict['num'] in (43, 124, 125):
+ del module.params[key][0]['use_option']
+ return ib_spec
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ network=dict(required=True, aliases=['name', 'cidr'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ container=dict(type='bool', ib_req=True)
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ network_type, ib_spec = check_ip_addr_type(obj_filter, ib_spec)
+
+ wapi = WapiModule(module)
+ # to check for vendor specific dhcp option
+ ib_spec = check_vendor_specific_dhcp_option(module, ib_spec)
+
+ result = wapi.run(network_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py
new file mode 100644
index 00000000..d13052b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network views
+description:
+ - Adds and/or removes instances of network view objects from
+ Infoblox NIOS servers. This module manages NIOS C(networkview) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of network view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - network_view
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new network view
+ community.general.nios_network_view:
+ name: ansible
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for network view
+ community.general.nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the network view
+ community.general.nios_network_view:
+ name: ansible
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update a existing network view
+ community.general.nios_network_view:
+ name: {new_name: ansible-new, old_name: ansible}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NETWORK_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['network_view'], ib_req=True),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NETWORK_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
new file mode 100644
index 00000000..bf2afd3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_nsgroup
+short_description: Configure InfoBlox DNS Nameserver Groups
+extends_documentation_fragment:
+- community.general.nios
+
+author:
+ - Erich Birngruber (@ebirn)
+ - Sumit Jaiswal (@sjaiswal)
+description:
+ - Adds and/or removes nameserver groups form Infoblox NIOS servers.
+ This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
+requirements:
+ - infoblox_client
+options:
+ name:
+ description:
+ - Specifies the name of the NIOS nameserver group to be managed.
+ required: true
+ grid_primary:
+ description:
+ - This host is to be used as primary server in this nameserver group. It must be a grid member.
+ This option is required when setting I(use_external_primaries) to C(false).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ grid_secondaries:
+ description:
+ - Configures the list of grid member hosts that act as secondary nameservers.
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ is_grid_default:
+ description:
+ - If set to C(True) this nsgroup will become the default nameserver group for new zones.
+ type: bool
+ required: false
+ default: false
+ use_external_primary:
+ description:
+ - This flag controls whether the group is using an external primary nameserver.
+ Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
+ type: bool
+ required: false
+ default: false
+ external_primaries:
+ description:
+ - Configures a list of external nameservers (non-members of the grid).
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ required: false
+ external_secondaries:
+ description:
+ - Allows to provide a list of external secondary nameservers, that are not members of the grid.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ choices: [present, absent]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Create simple infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Create infoblox nameserver group with external primaries
+ community.general.nios_nsgroup:
+ name: my-example-group
+ use_external_primary: true
+ comment: "this is my example nameserver group"
+ external_primaries: "{{ ext_nameservers }}"
+ grid_secondaries:
+ - name: infoblox-test.example.com
+ lead: True
+ preferred_primaries: "{{ ext_nameservers }}"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP
+
+
+# from infoblox documentation
+# Fields List
+# Field Type Req R/O Base Search
+# comment String N N Y : = ~
+# extattrs Extattr N N N ext
+# external_primaries [struct] N N N N/A
+# external_secondaries [struct] N N N N/A
+# grid_primary [struct] N N N N/A
+# grid_secondaries [struct] N N N N/A
+# is_grid_default Bool N N N N/A
+# is_multimaster Bool N Y N N/A
+# name String Y N Y : = ~
+# use_external_primary Bool N N N N/A
+
+
+def main():
+ '''entrypoint for module execution.'''
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ # cleanup tsig fields
+ def clean_tsig(ext):
+ if 'tsig_key' in ext and not ext['tsig_key']:
+ del ext['tsig_key']
+ if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
+ del ext['tsig_key_name']
+ if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
+ del ext['tsig_key_alg']
+
+ def clean_grid_member(member):
+ if member['preferred_primaries']:
+ for ext in member['preferred_primaries']:
+ clean_tsig(ext)
+ if member['enable_preferred_primaries'] is False:
+ del member['enable_preferred_primaries']
+ del member['preferred_primaries']
+ if member['lead'] is False:
+ del member['lead']
+ if member['grid_replicate'] is False:
+ del member['grid_replicate']
+
+ def ext_primaries_transform(module):
+ if module.params['external_primaries']:
+ for ext in module.params['external_primaries']:
+ clean_tsig(ext)
+ return module.params['external_primaries']
+
+ def ext_secondaries_transform(module):
+ if module.params['external_secondaries']:
+ for ext in module.params['external_secondaries']:
+ clean_tsig(ext)
+ return module.params['external_secondaries']
+
+ def grid_primary_preferred_transform(module):
+ for member in module.params['grid_primary']:
+ clean_grid_member(member)
+ return module.params['grid_primary']
+
+ def grid_secondaries_preferred_primaries_transform(module):
+ for member in module.params['grid_secondaries']:
+ clean_grid_member(member)
+ return module.params['grid_secondaries']
+
+ extserver_spec = dict(
+ address=dict(required=True, ib_req=True),
+ name=dict(required=True, ib_req=True),
+ stealth=dict(type='bool', default=False),
+ tsig_key=dict(no_log=True),
+ tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
+ tsig_key_name=dict(required=True)
+ )
+
+ memberserver_spec = dict(
+ name=dict(required=True, ib_req=True),
+ enable_preferred_primaries=dict(type='bool', default=False),
+ grid_replicate=dict(type='bool', default=False),
+ lead=dict(type='bool', default=False),
+ preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
+ stealth=dict(type='bool', default=False),
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_primary_preferred_transform),
+ grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_secondaries_preferred_primaries_transform),
+ external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
+ external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
+ transform=ext_secondaries_transform),
+ is_grid_default=dict(type='bool', default=False),
+ use_external_primary=dict(type='bool', default=False),
+ extattrs=dict(),
+ comment=dict(),
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NSGROUP, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py
new file mode 100644
index 00000000..96fb175b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_ptr_record
+author: "Trebuchet Clement (@clementtrebuchet)"
+short_description: Configure Infoblox NIOS PTR records
+description:
+ - Adds and/or removes instances of PTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - The name of the DNS PTR record in FQDN format to add or remove from
+ the system.
+ The field is required only for an PTR object in Forward Mapping Zone.
+ required: false
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ required: false
+ aliases:
+ - dns_view
+ ipv4addr:
+ description:
+ - The IPv4 Address of the record. Mutually exclusive with the ipv6addr.
+ aliases:
+ - ipv4
+ ipv6addr:
+ description:
+ - The IPv6 Address of the record. Mutually exclusive with the ipv4addr.
+ aliases:
+ - ipv6
+ ptrdname:
+ description:
+ - The domain name of the DNS PTR record in FQDN format.
+ ttl:
+ description:
+ - Time To Live (TTL) value for the record.
+ A 32-bit unsigned integer that represents the duration, in seconds, that the record is valid (cached).
+ Zero indicates that the record should not be cached.
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance. Maximum 256 characters.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Create a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_PTR_RECORD
+
+
+def main():
+ # Module entry point
+ ib_spec = dict(
+ name=dict(required=False),
+ view=dict(aliases=['dns_view'], ib_req=True),
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+ ptrdname=dict(ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ mutually_exclusive = [('ipv4addr', 'ipv6addr')]
+ required_one_of = [
+ ['ipv4addr', 'ipv6addr']
+ ]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ required_one_of=required_one_of)
+
+ if module.params['ipv4addr']:
+ del ib_spec['ipv6addr']
+ elif module.params['ipv6addr']:
+ del ib_spec['ipv4addr']
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_PTR_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py
new file mode 100644
index 00000000..c519c191
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_srv_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS SRV records
+description:
+ - Adds and/or removes instances of SRV record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:srv) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ port:
+ description:
+ - Configures the port (0-65535) of this SRV record.
+ priority:
+ description:
+ - Configures the priority (0-65535) for this SRV record.
+ target:
+ description:
+ - Configures the target FQDN for this SRV record.
+ weight:
+ description:
+ - Configures the weight (0-65535) for this SRV record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an SRV record from the system
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ port=dict(type='int', ib_req=True),
+ priority=dict(type='int', ib_req=True),
+ target=dict(ib_req=True),
+ weight=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_SRV_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py
new file mode 100644
index 00000000..0dcdbadb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py
new file mode 100644
index 00000000..8a7607fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ required: true
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ required: true
+ ns_group:
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a zone on the system using grid primary and secondaries
+ community.general.nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a zone on the system using a name server group
+ community.general.nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV6 zone format
+ community.general.nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment and ext attributes for an existing zone
+ community.general.nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns zone
+ community.general.nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the reverse mapping dns zone from the system with IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py
new file mode 100644
index 00000000..60626294
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+- Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+- nmcli
+description:
+ - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
+ - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+options:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ type: bool
+ default: yes
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is <type>[-<ifname>][-<num>].
+ type: str
+ required: true
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection will only be applicable to this interface name.
+ - A special value of C('*') can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+ - This parameter defaults to C(conn_name) when left unset.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type C(generic) is added in Ansible 2.5.
+ type: str
+ choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+ type: str
+ choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+ default: balance-rr
+ master:
+ description:
+ - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ type: str
+ ip4:
+ description:
+ - The IPv4 address to this interface.
+ - Use the format C(192.0.2.24/24).
+ type: str
+ gw4:
+ description:
+ - The IPv4 gateway for this interface.
+ - Use the format C(192.0.2.1).
+ type: str
+ dns4:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ ip6:
+ description:
+ - The IPv6 address to this interface.
+ - Use the format C(abbe::cafe).
+ type: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format C(2001:db8::1).
+ type: str
+ dns6:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ mtu:
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+ - This parameter defaults to C(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to C(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: yes
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - This is only used with bridge - MAC address of the bridge.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
+ type: bool
+ default: yes
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT local IP address.
+ type: str
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# | /group_vars/openstack-stage.yml
+# | /host_vars/controller-01.openstack.host.com
+# | /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# | /playbook-add.yml
+# | /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #bond vars
+# nmcli_bond:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# mode: balance-rr
+# nmcli_bond_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+# - conn_name: em1
+# ifname: em1
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: em2
+# ifname: em2
+# ip4: '{{ tenant_ip1 }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: p2p1
+# ifname: p2p1
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# - conn_name: p2p2
+# ifname: p2p2
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
+
+##### Working with all cloud nodes - Teaming
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
+
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
+
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
+
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: yes
+
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
+
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
+
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
+
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
+
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
+
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+# - 0 Success - indicates the operation succeeded
+# - 1 Unknown or unspecified error
+# - 2 Invalid user input, wrong nmcli invocation
+# - 3 Timeout expired (see --wait option)
+# - 4 Connection activation failed
+# - 5 Connection deactivation failed
+# - 6 Disconnecting device failed
+# - 7 Connection deletion failed
+# - 8 NetworkManager is not running
+# - 9 nmcli and NetworkManager versions mismatch
+# - 10 Connection, device, or access point does not exist.
+'''
+
+RETURN = r"""#
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+import re
+
+
+class NmcliModuleError(Exception):
+ pass
+
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.dns4 = module.params['dns4']
+ self.dns4_search = module.params['dns4_search']
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.dns6 = module.params['dns6']
+ self.dns6_search = module.params['dns6_search']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.primary = module.params['primary']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ self.hairpin = module.params['hairpin']
+ self.path_cost = module.params['path_cost']
+ self.mac = module.params['mac']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
+ self.vxlan_id = module.params['vxlan_id']
+ self.vxlan_local = module.params['vxlan_local']
+ self.vxlan_remote = module.params['vxlan_remote']
+ self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+ self.ip_tunnel_local = module.params['ip_tunnel_local']
+ self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+ self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+ self.dhcp_client_id = module.params['dhcp_client_id']
+
+ if self.ip4:
+ self.ipv4_method = 'manual'
+ else:
+ # supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv4_method = None
+
+ if self.ip6:
+ self.ipv6_method = 'manual'
+ else:
+ # supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv6_method = None
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ if isinstance(cmd, list):
+ cmd = [to_text(item) for item in cmd]
+ else:
+ cmd = to_text(cmd)
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def connection_options(self, detect_change=False):
+ # Options common to multiple connection types.
+ options = {
+ 'connection.autoconnect': self.autoconnect,
+ }
+
+ # IP address options.
+ if self.ip_conn_type:
+ options.update({
+ 'ipv4.addresses': self.ip4,
+ 'ipv4.dhcp-client-id': self.dhcp_client_id,
+ 'ipv4.dns': self.dns4,
+ 'ipv4.dns-search': self.dns4_search,
+ 'ipv4.gateway': self.gw4,
+ 'ipv4.method': self.ipv4_method,
+ 'ipv6.addresses': self.ip6,
+ 'ipv6.dns': self.dns6,
+ 'ipv6.dns-search': self.dns6_search,
+ 'ipv6.gateway': self.gw6,
+ 'ipv6.method': self.ipv6_method,
+ })
+
+ # Layer 2 options.
+ if self.mac_conn_type:
+ options.update({self.mac_setting: self.mac})
+
+ if self.mtu_conn_type:
+ options.update({self.mtu_setting: self.mtu})
+
+ # Connections that can have a master.
+ if self.slave_conn_type:
+ options.update({
+ 'connection.master': self.master,
+ })
+
+ # Options specific to a connection type.
+ if self.type == 'bond':
+ options.update({
+ 'arp-interval': self.arp_interval,
+ 'arp-ip-target': self.arp_ip_target,
+ 'downdelay': self.downdelay,
+ 'miimon': self.miimon,
+ 'mode': self.mode,
+ 'primary': self.primary,
+ 'updelay': self.updelay,
+ })
+ elif self.type == 'bridge':
+ options.update({
+ 'bridge.ageing-time': self.ageingtime,
+ 'bridge.forward-delay': self.forwarddelay,
+ 'bridge.hello-time': self.hellotime,
+ 'bridge.max-age': self.maxage,
+ 'bridge.priority': self.priority,
+ 'bridge.stp': self.stp,
+ })
+ elif self.type == 'bridge-slave':
+ options.update({
+ 'bridge-port.path-cost': self.path_cost,
+ 'bridge-port.hairpin-mode': self.hairpin,
+ 'bridge-port.priority': self.slavepriority,
+ })
+ elif self.tunnel_conn_type:
+ options.update({
+ 'ip-tunnel.local': self.ip_tunnel_local,
+ 'ip-tunnel.mode': self.type,
+ 'ip-tunnel.parent': self.ip_tunnel_dev,
+ 'ip-tunnel.remote': self.ip_tunnel_remote,
+ })
+ elif self.type == 'vlan':
+ options.update({
+ 'vlan.id': self.vlanid,
+ 'vlan.parent': self.vlandev,
+ })
+ elif self.type == 'vxlan':
+ options.update({
+ 'vxlan.id': self.vxlan_id,
+ 'vxlan.local': self.vxlan_local,
+ 'vxlan.remote': self.vxlan_remote,
+ })
+
+ # Convert settings values based on the situation.
+ for setting, value in options.items():
+ setting_type = self.settings_type(setting)
+ convert_func = None
+ if setting_type is bool:
+ # Convert all bool options to yes/no.
+ convert_func = self.bool_to_string
+ if detect_change:
+ if setting in ('vlan.id', 'vxlan.id'):
+ # Convert VLAN/VXLAN IDs to text when detecting changes.
+ convert_func = to_text
+ elif setting == self.mtu_setting:
+ # MTU is 'auto' by default when detecting changes.
+ convert_func = self.mtu_to_string
+ elif setting_type is list:
+ # Convert lists to strings for nmcli create/modify commands.
+ convert_func = self.list_to_string
+
+ if callable(convert_func):
+ options[setting] = convert_func(options[setting])
+
+ return options
+
+ @property
+ def ip_conn_type(self):
+ return self.type in (
+ 'bond',
+ 'bridge',
+ 'ethernet',
+ 'generic',
+ 'team',
+ 'vlan',
+ )
+
+ @property
+ def mac_conn_type(self):
+ return self.type == 'bridge'
+
+ @property
+ def mac_setting(self):
+ if self.type == 'bridge':
+ return 'bridge.mac-address'
+ else:
+ return '802-3-ethernet.cloned-mac-address'
+
+ @property
+ def mtu_conn_type(self):
+ return self.type in (
+ 'ethernet',
+ 'team-slave',
+ )
+
+ @property
+ def mtu_setting(self):
+ return '802-3-ethernet.mtu'
+
+ @staticmethod
+ def mtu_to_string(mtu):
+ if not mtu:
+ return 'auto'
+ else:
+ return to_text(mtu)
+
+ @property
+ def slave_conn_type(self):
+ return self.type in (
+ 'bond-slave',
+ 'bridge-slave',
+ 'team-slave',
+ )
+
+ @property
+ def tunnel_conn_type(self):
+ return self.type in (
+ 'ipip',
+ 'sit',
+ )
+
+ @staticmethod
+ def bool_to_string(boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ @staticmethod
+ def list_to_string(lst):
+ return ",".join(lst or [""])
+
+ @staticmethod
+ def settings_type(setting):
+ if setting in ('bridge.stp',
+ 'bridge-port.hairpin-mode',
+ 'connection.autoconnect'):
+ return bool
+ elif setting in ('ipv4.dns',
+ 'ipv4.dns-search',
+ 'ipv6.dns',
+ 'ipv6.dns-search'):
+ return list
+ return str
+
+ def list_connection_info(self):
+ cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ raise NmcliModuleError(err)
+ return out.splitlines()
+
+ def connection_exists(self):
+ return self.conn_name in self.list_connection_info()
+
+ def down_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+ return self.execute_command(cmd)
+
+ def connection_update(self, nmcli_command):
+ if nmcli_command == 'create':
+ cmd = [self.nmcli_bin, 'con', 'add', 'type']
+ if self.tunnel_conn_type:
+ cmd.append('ip-tunnel')
+ else:
+ cmd.append(self.type)
+ cmd.append('con-name')
+ elif nmcli_command == 'modify':
+ cmd = [self.nmcli_bin, 'con', 'modify']
+ else:
+ self.module.fail_json(msg="Invalid nmcli command.")
+ cmd.append(self.conn_name)
+
+ # Use connection name as default for interface name on creation.
+ if nmcli_command == 'create' and self.ifname is None:
+ ifname = self.conn_name
+ else:
+ ifname = self.ifname
+
+ options = {
+ 'connection.interface-name': ifname,
+ }
+
+ options.update(self.connection_options())
+
+ # Constructing the command.
+ for key, value in options.items():
+ if value is not None:
+ cmd.extend([key, value])
+
+ return self.execute_command(cmd)
+
+ def create_connection(self):
+ status = self.connection_update('create')
+ if self.create_connection_up:
+ status = self.up_connection()
+ return status
+
+ @property
+ def create_connection_up(self):
+ if self.type in ('bond', 'ethernet'):
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ elif self.type == 'team':
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ return False
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ return self.connection_update('modify')
+
+ def show_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'show', self.conn_name]
+
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$')
+
+ conn_info = dict()
+ for line in out.splitlines():
+ pair = line.split(':', 1)
+ key = pair[0].strip()
+ key_type = self.settings_type(key)
+ if key and len(pair) > 1:
+ raw_value = pair[1].lstrip()
+ if raw_value == '--':
+ conn_info[key] = None
+ elif key == 'bond.options':
+ # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
+ opts = raw_value.split(',')
+ for opt in opts:
+ alias_pair = opt.split('=', 1)
+ if len(alias_pair) > 1:
+ alias_key = alias_pair[0]
+ alias_value = alias_pair[1]
+ conn_info[alias_key] = alias_value
+ elif key_type == list:
+ conn_info[key] = [s.strip() for s in raw_value.split(',')]
+ else:
+ m_enum = p_enum_value.match(raw_value)
+ if m_enum is not None:
+ value = m_enum.group(1)
+ else:
+ value = raw_value
+ conn_info[key] = value
+
+ return conn_info
+
+ def _compare_conn_params(self, conn_info, options):
+ # See nmcli(1) for details
+ param_alias = {
+ 'type': 'connection.type',
+ 'con-name': 'connection.id',
+ 'autoconnect': 'connection.autoconnect',
+ 'ifname': 'connection.interface-name',
+ 'mac': self.mac_setting,
+ 'master': 'connection.master',
+ 'slave-type': 'connection.slave-type',
+ }
+
+ changed = False
+ diff_before = dict()
+ diff_after = dict()
+
+ for key, value in options.items():
+ if not value:
+ continue
+
+ if key in conn_info:
+ current_value = conn_info[key]
+ elif key in param_alias:
+ real_key = param_alias[key]
+ if real_key in conn_info:
+ current_value = conn_info[real_key]
+ else:
+ # alias parameter does not exist
+ current_value = None
+ else:
+ # parameter does not exist
+ current_value = None
+
+ if isinstance(current_value, list) and isinstance(value, list):
+ # compare values between two lists
+ if sorted(current_value) != sorted(value):
+ changed = True
+ else:
+ if current_value != to_text(value):
+ changed = True
+
+ diff_before[key] = current_value
+ diff_after[key] = value
+
+ diff = {
+ 'before': diff_before,
+ 'after': diff_after,
+ }
+ return (changed, diff)
+
+ def is_connection_changed(self):
+ options = {
+ 'connection.interface-name': self.ifname,
+ }
+ options.update(self.connection_options(detect_change=True))
+ return self._compare_conn_params(self.show_connection(), options)
+
+
+def main():
+ # Parsing argument file
+ module = AnsibleModule(
+ argument_spec=dict(
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
+ ip4=dict(type='str'),
+ gw4=dict(type='str'),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='str'),
+ gw6=dict(type='str'),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool', default=True),
+ path_cost=dict(type='int', default=100),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli = Nmcli(module)
+
+ (rc, out, err) = (None, '', '')
+ result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="Please specify a name for the connection")
+ # team-slave checks
+ if nmcli.type == 'team-slave':
+ if nmcli.master is None:
+ nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
+ if nmcli.ifname is None:
+ nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
+
+ try:
+ if nmcli.state == 'absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state == 'present':
+ if nmcli.connection_exists():
+ changed, diff = nmcli.is_connection_changed()
+ if module._diff:
+ result['diff'] = diff
+
+ if changed:
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists'] = 'Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.modify_connection()
+ else:
+ result['Exists'] = 'Connections already exist and no changes made'
+ if module.check_mode:
+ module.exit_json(changed=False, **result)
+ if not nmcli.connection_exists():
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+ except NmcliModuleError as e:
+ module.fail_json(name=nmcli.conn_name, msg=str(e))
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py
new file mode 100644
index 00000000..6c285797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Launch a Nomad Job
+description:
+ - Launch a Nomad job.
+ - Stop a Nomad job.
+ - Force start a Nomad job
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or I(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or I(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Create job
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
+- name: Stop job
+ community.general.nomad_job:
+ host: localhost
+ state: absent
+ name: api
+
+- name: Force job to start
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ name: api
+ timeout: 120
+ force_start: true
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ content_format=dict(choices=['hcl', 'json'], default='hcl'),
+ content=dict(type='str', default=None),
+ force_start=dict(type='bool', default=False),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ["name", "content"]
+ ],
+ required_one_of=[
+ ['name', 'content']
+ ]
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ if module.params.get('state') == "present":
+
+ if module.params.get('name') and not module.params.get('force_start'):
+ module.fail_json(msg='For start job with name, force_start is needed')
+
+ changed = False
+ if module.params.get('content'):
+
+ if module.params.get('content_format') == 'json':
+
+ job_json = module.params.get('content')
+ try:
+ job_json = json.loads(job_json)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+ job = dict()
+ job['job'] = job_json
+ try:
+ job_id = job_json.get('ID')
+ if job_id is None:
+ module.fail_json(msg="Cannot retrieve job with ID None")
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('content_format') == 'hcl':
+
+ try:
+ job_hcl = module.params.get('content')
+ job_json = nomad_client.jobs.parse(job_hcl)
+ job = dict()
+ job['job'] = job_json
+ except nomad.api.exceptions.BadRequestNomadException as err:
+ msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
+ module.fail_json(msg=to_native(msg))
+ try:
+ job_id = job_json.get('ID')
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('force_start'):
+
+ try:
+ job = dict()
+ if module.params.get('name'):
+ job_name = module.params.get('name')
+ else:
+ job_name = job_json['Name']
+ job_json = nomad_client.job.get_job(job_name)
+ if job_json['Status'] == 'running':
+ result = job_json
+ else:
+ job_json['Status'] = 'running'
+ job_json['Stop'] = False
+ job['job'] = job_json
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = nomad_client.validate.validate_job(job)
+ if not result.status_code == 200:
+ module.fail_json(msg=to_native(result.text))
+ result = json.loads(result.text)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ try:
+ if not module.params.get('name') is None:
+ job_name = module.params.get('name')
+ else:
+ if module.params.get('content_format') == 'hcl':
+ job_json = nomad_client.jobs.parse(module.params.get('content'))
+ job_name = job_json['Name']
+ if module.params.get('content_format') == 'json':
+ job_json = module.params.get('content')
+ job_name = job_json['Name']
+ job = nomad_client.job.get_job(job_name)
+ if job['Status'] == 'dead':
+ changed = False
+ result = job
+ else:
+ if not module.check_mode:
+ result = nomad_client.job.deregister_job(job_name)
+ else:
+ result = job
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py
new file mode 100644
index 00000000..9e935328
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job_info
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Get Nomad Jobs info
+description:
+ - Get info for one Nomad job.
+ - List Nomad jobs.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Get info for job awx
+ community.general.nomad_job:
+ host: localhost
+ name: awx
+ register: result
+
+- name: List Nomad jobs
+ community.general.nomad_job:
+ host: localhost
+ register: result
+
+'''
+
+RETURN = '''
+result:
+ description: List with dictionary contains jobs info
+ returned: success
+ type: list
+ sample: [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
+ "Affinities": null,
+ "Constraints": null,
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
+ },
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
+ ]
+
+'''
+
+
+import os
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ changed = False
+ nomad_jobs = list()
+ try:
+ job_list = nomad_client.jobs.get_jobs()
+ for job in job_list:
+ nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
+ result = nomad_jobs
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('name'):
+ filter = list()
+ try:
+ for job in result:
+ if job.get('ID') == module.params.get('name'):
+ filter.append(job)
+ result = filter
+ if not filter:
+ module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py
new file mode 100644
index 00000000..0f7de471
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Thomas Caravia <taca@kadisius.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nosh
+author:
+ - "Thomas Caravia (@tacatac)"
+short_description: Manage services with nosh
+description:
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [ started, stopped, reset, restarted, reloaded ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ C(restarted) will always bounce the service.
+ C(reloaded) will send a SIGHUP or start the service.
+ C(reset) will start or stop the service according to whether it is
+ enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file
+ preference or running state. Mutually exclusive with I(preset). Will take
+ effect prior to I(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in *.preset files.
+ Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
+ effect prior to I(state=reset).
+ user:
+ required: false
+ default: 'no'
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than
+ the system-wide service manager.
+requirements:
+ - A system with an active nosh service manager, see Notes for further information.
+notes:
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+'''
+
+EXAMPLES = '''
+- name: Start dnscache if not running
+ community.general.nosh: name=dnscache state=started
+
+- name: Stop mpd, if running
+ community.general.nosh: name=mpd state=stopped
+
+- name: Restart unbound or start it if not already running
+ community.general.nosh:
+ name: unbound
+ state: restarted
+
+- name: Reload fail2ban or start it if not already running
+ community.general.nosh:
+ name: fail2ban
+ state: reloaded
+
+- name: Disable nsd
+ community.general.nosh: name=nsd enabled=no
+
+- name: For package installers, set nginx running state according to local enable settings, preset and reset
+ community.general.nosh: name=nginx preset=True state=reset
+
+- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
+ community.general.nosh: name=reboot state=started
+
+- name: Using conditionals with the module facts
+ tasks:
+ - name: Obtain information on tinydns service
+ community.general.nosh: name=tinydns
+ register: result
+
+ - name: Fail if service not loaded
+ ansible.builtin.fail: msg="The {{ result.name }} service is not loaded"
+ when: not result.status
+
+ - name: Fail if service is running
+ ansible.builtin.fail: msg="The {{ result.name }} service is running"
+ when: result.status and result.status['DaemontoolsEncoreState'] == "running"
+'''
+
+RETURN = '''
+name:
+ description: name used to find the service
+ returned: success
+ type: str
+ sample: "sshd"
+service_path:
+ description: resolved path for the service
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
+enabled:
+ description: whether the service is enabled at system bootstrap
+ returned: success
+ type: bool
+ sample: True
+preset:
+ description: whether the enabled status reflects the one set in the relevant C(*.preset) file
+ returned: success
+ type: bool
+ sample: 'False'
+state:
+ description: service process run state, C(None) if the service is not loaded and will not be started
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
+status:
+ description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: True
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+user:
+ description: whether the user-level service manager is called
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def run_sys_ctl(module, args):
+ sys_ctl = [module.get_bin_path('system-control', required=True)]
+ if module.params['user']:
+ sys_ctl = sys_ctl + ['--user']
+ return module.run_command(sys_ctl + args)
+
+
+def get_service_path(module, service):
+ (rc, out, err) = run_sys_ctl(module, ['find', service])
+ # fail if service not found
+ if rc != 0:
+ fail_if_missing(module, False, service, msg='host')
+ else:
+ return to_native(out).strip()
+
+
+def service_is_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
+ return rc == 0
+
+
+def service_is_preset_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
+ return to_native(out).strip().startswith("enable")
+
+
+def service_is_loaded(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
+ return rc == 0
+
+
+def get_service_status(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
+ # will fail if not service is not loaded
+ if err is not None and err:
+ module.fail_json(msg=err)
+ else:
+ json_out = json.loads(to_native(out).strip())
+ status = json_out[service_path] # descend past service path header
+ return status
+
+
+def service_is_running(service_status):
+ return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
+
+
+def handle_enabled(module, result, service_path):
+ """Enable or disable a service as needed.
+
+ - 'preset' will set the enabled state according to available preset file settings.
+ - 'enabled' will set the enabled state explicitly, independently of preset settings.
+
+ These options are set to "mutually exclusive" but the explicit 'enabled' option will
+ have priority if the check is bypassed.
+ """
+
+ # computed prior in control flow
+ preset = result['preset']
+ enabled = result['enabled']
+
+ # preset, effect only if option set to true (no reverse preset)
+ if module.params['preset']:
+ action = 'preset'
+
+ # run preset if needed
+ if preset != module.params['preset']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['preset'] = not preset
+ result['enabled'] = not enabled
+
+ # enabled/disabled state
+ if module.params['enabled'] is not None:
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['enabled'] = not enabled
+ result['preset'] = not preset
+
+
+def handle_state(module, result, service_path):
+ """Set service running state as needed.
+
+ Takes into account the fact that a service may not be loaded (no supervise directory) in
+ which case it is 'stopped' as far as the service manager is concerned. No status information
+ can be obtained and the service can only be 'started'.
+ """
+ # default to desired state, no action
+ result['state'] = module.params['state']
+ state = module.params['state']
+ action = None
+
+ # computed prior in control flow, possibly modified by handle_enabled()
+ enabled = result['enabled']
+
+ # service not loaded -> not started by manager, no status information
+ if not service_is_loaded(module, service_path):
+ if state in ['started', 'restarted', 'reloaded']:
+ action = 'start'
+ result['state'] = 'started'
+ elif state == 'reset':
+ if enabled:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ result['state'] = None
+ else:
+ result['state'] = None
+
+ # service is loaded
+ else:
+ # get status information
+ result['status'] = get_service_status(module, service_path)
+ running = service_is_running(result['status'])
+
+ if state == 'started':
+ if not running:
+ action = 'start'
+ elif state == 'stopped':
+ if running:
+ action = 'stop'
+ # reset = start/stop according to enabled status
+ elif state == 'reset':
+ if enabled is not running:
+ if running:
+ action = 'stop'
+ result['state'] = 'stopped'
+ else:
+ action = 'start'
+ result['state'] = 'started'
+ # start if not running, 'service' module constraint
+ elif state == 'restarted':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'condrestart'
+ # start if not running, 'service' module constraint
+ elif state == 'reloaded':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'hangup'
+
+ # change state as needed
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ preset=dict(type='bool'),
+ user=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['enabled', 'preset']],
+ )
+
+ service = module.params['name']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': None,
+ }
+
+ # check service can be found (or fail) and get path
+ service_path = get_service_path(module, service)
+
+ # get preliminary service facts
+ result['service_path'] = service_path
+ result['user'] = module.params['user']
+ result['enabled'] = service_is_enabled(module, service_path)
+ result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
+
+ # set enabled state, service need not be loaded
+ if module.params['enabled'] is not None or module.params['preset']:
+ handle_enabled(module, result, service_path)
+
+ # set service running state
+ if module.params['state'] is not None:
+ handle_state(module, result, service_path)
+
+ # get final service status if possible
+ if service_is_loaded(module, service_path):
+ result['status'] = get_service_status(module, service_path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py
new file mode 100644
index 00000000..4c907ea6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: bearychat
+short_description: Send BearyChat notifications
+description:
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
+ via the Incoming Robot integration.
+author: "Jiangge Zhang (@tonyseek)"
+options:
+ url:
+ type: str
+ description:
+ - BearyChat WebHook URL. This authenticates you to the bearychat
+ service. It looks like
+ C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ required: true
+ text:
+ type: str
+ description:
+ - Message to send.
+ markdown:
+ description:
+ - If C(yes), text will be parsed as markdown.
+ default: 'yes'
+ type: bool
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the
+ default channel selected by the I(url).
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. For more information, see
+ https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
+'''
+
+EXAMPLES = """
+- name: Send notification message via BearyChat
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via BearyChat all options
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+ markdown: no
+ channel: "#ansible"
+ attachments:
+ - title: "Ansible on {{ inventory_hostname }}"
+ text: "May the Force be with you."
+ color: "#ffffff"
+ images:
+ - http://example.com/index.png
+"""
+
+RETURN = """
+msg:
+ description: execution result
+ returned: success
+ type: str
+ sample: "OK"
+"""
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def build_payload_for_bearychat(module, text, markdown, channel, attachments):
+ payload = {}
+ if text is not None:
+ payload['text'] = text
+ if markdown is not None:
+ payload['markdown'] = markdown
+ if channel is not None:
+ payload['channel'] = channel
+ if attachments is not None:
+ payload.setdefault('attachments', []).extend(
+ build_payload_for_bearychat_attachment(
+ module, item.get('title'), item.get('text'), item.get('color'),
+ item.get('images'))
+ for item in attachments)
+ payload = 'payload=%s' % module.jsonify(payload)
+ return payload
+
+
+def build_payload_for_bearychat_attachment(module, title, text, color, images):
+ attachment = {}
+ if title is not None:
+ attachment['title'] = title
+ if text is not None:
+ attachment['text'] = text
+ if color is not None:
+ attachment['color'] = color
+ if images is not None:
+ target_images = attachment.setdefault('images', [])
+ if not isinstance(images, (list, tuple)):
+ images = [images]
+ for image in images:
+ if isinstance(image, dict) and 'url' in image:
+ image = {'url': image['url']}
+ elif hasattr(image, 'startswith') and image.startswith('http'):
+ image = {'url': image}
+ else:
+ module.fail_json(
+ msg="BearyChat doesn't have support for this kind of "
+ "attachment image")
+ target_images.append(image)
+ return attachment
+
+
+def do_notify_bearychat(module, url, payload):
+ response, info = fetch_url(module, url, data=payload)
+ if info['status'] != 200:
+ url_info = urlparse(url)
+ obscured_incoming_webhook = urlunparse(
+ (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
+ module.fail_json(
+ msg=" failed to send %s to %s: %s" % (
+ payload, obscured_incoming_webhook, info['msg']))
+
+
+def main():
+ module = AnsibleModule(argument_spec={
+ 'url': dict(type='str', required=True, no_log=True),
+ 'text': dict(type='str'),
+ 'markdown': dict(default=True, type='bool'),
+ 'channel': dict(type='str'),
+ 'attachments': dict(type='list', elements='dict'),
+ })
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ url = module.params['url']
+ text = module.params['text']
+ markdown = module.params['markdown']
+ channel = module.params['channel']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_bearychat(
+ module, text, markdown, channel, attachments)
+ do_notify_bearychat(module, url, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py
new file mode 100644
index 00000000..c6848238
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: campfire
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+options:
+ subscription:
+ type: str
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ notify:
+ type: str
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
+
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
+'''
+
+try:
+ from html import escape as html_escape
+except ImportError:
+ # Python-3.2 or later
+ import cgi
+
+ def html_escape(text, quote=True):
+ return cgi.escape(text, quote)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py
new file mode 100644
index 00000000..13833620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jonathan Mainguy <jon@soh.re>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# basis of code taken from the ansible twillio and nexmo modules
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: catapult
+short_description: Send a sms / mms using the catapult bandwidth api
+description:
+ - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+options:
+ src:
+ type: str
+ description:
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ required: true
+ dest:
+ type: list
+ elements: str
+ description:
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ required: true
+ msg:
+ type: str
+ description:
+ - The contents of the text message (must be 2048 characters or less).
+ required: true
+ media:
+ type: str
+ description:
+ - For MMS messages, a media url to the location of the media to be sent with the message.
+ user_id:
+ type: str
+ description:
+ - User Id from Api account page.
+ required: true
+ api_token:
+ type: str
+ description:
+ - Api Token from Api account page.
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Api Secret from Api account page.
+ required: true
+
+author: "Jonathan Mainguy (@Jmainguy)"
+notes:
+ - Will return changed even if the media url is wrong.
+ - Will return changed if the destination number is invalid.
+
+'''
+
+EXAMPLES = '''
+- name: Send a mms to multiple users
+ community.general.catapult:
+ src: "+15035555555"
+ dest:
+ - "+12525089000"
+ - "+12018994225"
+ media: "http://example.com/foobar.jpg"
+ msg: "Task is complete"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+- name: Send a sms to a single user
+ community.general.catapult:
+ src: "+15035555555"
+ dest: "+12018994225"
+ msg: "Consider yourself notified"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+'''
+
+RETURN = '''
+changed:
+ description: Whether the api accepted the message.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def send(module, src, dest, msg, media, user_id, api_token, api_secret):
+ """
+ Send the message
+ """
+ AGENT = "Ansible"
+ URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
+ data = {'from': src, 'to': dest, 'text': msg}
+ if media:
+ data['media'] = media
+
+ headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = api_token.replace('\n', '')
+ module.params['url_password'] = api_secret.replace('\n', '')
+
+ return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(required=True),
+ dest=dict(required=True, type='list', elements='str'),
+ msg=dict(required=True),
+ user_id=dict(required=True),
+ api_token=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ media=dict(default=None, required=False),
+ ),
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ msg = module.params['msg']
+ media = module.params['media']
+ user_id = module.params['user_id']
+ api_token = module.params['api_token']
+ api_secret = module.params['api_secret']
+
+ for number in dest:
+ rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
+ if info["status"] != 201:
+ body = json.loads(info["body"])
+ fail_msg = body["message"]
+ module.fail_json(msg=fail_msg)
+
+ changed = True
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py
new file mode 100644
index 00000000..a1842c5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: flowdock
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ type:
+ type: str
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ type: str
+ description:
+ - Content of the message
+ required: true
+ tags:
+ type: str
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ type: str
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ type: str
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ type: str
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ type: str
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ type: str
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ type: str
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ type: str
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ type: str
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox", "chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="external_user_name is required for the 'chat' type")
+
+ # required params for the 'inbox' type
+ for item in ['from_address', 'source', 'subject']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in ['from_name', 'reply_to', 'project', 'link']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py
new file mode 100644
index 00000000..c1816e63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: grove
+short_description: Sends a notification to a grove.io channel
+description:
+ - The C(grove) module sends a message for a service to a Grove.io
+ channel.
+options:
+ channel_token:
+ type: str
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ type: str
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message:
+ type: str
+ description:
+ - Message content
+ required: true
+ url:
+ type: str
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ type: str
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- name: Sends a notification to a grove.io channel
+ community.general.grove:
+ channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service: my-app
+ message: 'deployed {{ target }}'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py
new file mode 100644
index 00000000..06c9fca4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hipchat
+short_description: Send a message to Hipchat.
+description:
+ - Send a message to a Hipchat room, with options to control the formatting.
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - ID or name of the room.
+ required: true
+ msg_from:
+ type: str
+ description:
+ - Name the message will appear to be sent from. Max length is 15
+ characters - above this it will be truncated.
+ default: Ansible
+ aliases: [from]
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ color:
+ type: str
+ description:
+ - Background color for the message.
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ type: str
+ description:
+ - Message format.
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - If true, a notification will be triggered for users in the room.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ api:
+ type: str
+ description:
+ - API url if using a self-hosted hipchat server. For Hipchat API version
+ 2 use the default URI with C(/v2) instead of C(/v1).
+ default: 'https://api.hipchat.com/v1'
+
+author:
+- Shirou Wakayama (@shirou)
+- Paul Bourdel (@pb8226)
+'''
+
+EXAMPLES = '''
+- name: Send a message to a Hipchat room
+ community.general.hipchat:
+ room: notif
+ msg: Ansible task finished
+
+- name: Send a message to a Hipchat room using Hipchat API version 2
+ community.general.hipchat:
+ api: https://api.hipchat.com/v2/
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: Ansible task finished
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py
new file mode 100644
index 00000000..1c050fc1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: irc
+short_description: Send a message to an IRC channel or a nick
+description:
+ - Send a message to an IRC channel or a nick. This is a very simplistic implementation.
+options:
+ server:
+ type: str
+ description:
+ - IRC server name/address
+ default: localhost
+ port:
+ type: int
+ description:
+ - IRC server port number
+ default: 6667
+ nick:
+ type: str
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ default: ansible
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ topic:
+ type: str
+ description:
+ - Set the channel topic
+ color:
+ type: str
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ aliases: [colour]
+ channel:
+ type: str
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ nick_to:
+ type: list
+ elements: str
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ key:
+ type: str
+ description:
+ - Channel key
+ passwd:
+ type: str
+ description:
+ - Server password
+ timeout:
+ type: int
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ type: bool
+ default: 'no'
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ type: bool
+ default: 'yes'
+ style:
+ type: str
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ default: none
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to an IRC channel from nick ansible
+ community.general.irc:
+ server: irc.example.net
+ channel: #t1
+ msg: Hello world
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ nick_to:
+ - nick1
+ - nick2
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+import time
+import traceback
+
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+ nick_to = [] if nick_to is None else nick_to
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except Exception:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except Exception:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+
+ if passwd:
+ irc.send(to_bytes('PASS %s\r\n' % passwd))
+ irc.send(to_bytes('NICK %s\r\n' % nick))
+ irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += to_native(irc.recv(1024))
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ time.sleep(0.5)
+
+ if channel:
+ if key:
+ irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
+ else:
+ irc.send(to_bytes('JOIN %s\r\n' % channel))
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += to_native(irc.recv(1024))
+ if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ time.sleep(0.5)
+
+ if topic is not None:
+ irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
+ time.sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
+ if channel:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
+ time.sleep(1)
+ if part:
+ if channel:
+ irc.send(to_bytes('PART %s\r\n' % channel))
+ irc.send(to_bytes('QUIT\r\n'))
+ time.sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list', elements='str'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception as e:
+ module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py
new file mode 100644
index 00000000..68e2c593
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+options:
+ user:
+ type: str
+ description:
+ - User as which to connect
+ required: true
+ password:
+ type: str
+ description:
+ - password for user to connect
+ required: true
+ to:
+ type: str
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ host:
+ type: str
+ description:
+ - host to connect, overrides user info
+ port:
+ type: int
+ description:
+ - port to connect to, overrides default
+ default: 5222
+ encoding:
+ type: str
+ description:
+ - message encoding
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to a user
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
+
+- name: Send a message to a room
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
+
+- name: Send a message, specifying the host and port
+ community.general.jabber:
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
+'''
+
+import time
+import traceback
+
+HAS_XMPP = True
+XMPP_IMP_ERR = None
+try:
+ import xmpp
+except ImportError:
+ XMPP_IMP_ERR = traceback.format_exc()
+ HAS_XMPP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False, default=5222, type='int'),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = module.params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ join = xmpp.Presence(to=module.params['to'])
+ join.setTag('x', namespace='http://jabber.org/protocol/muc')
+ conn.send(join)
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py
new file mode 100644
index 00000000..59e0f325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries_msg
+short_description: Send a message to logentries.
+description:
+ - Send a message to logentries
+requirements:
+ - "python >= 2.6"
+options:
+ token:
+ type: str
+ description:
+ - Log token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ api:
+ type: str
+ description:
+ - API endpoint
+ default: data.logentries.com
+ port:
+ type: int
+ description:
+ - API endpoint port
+ default: 80
+author: "Jimmy Tang (@jcftang) <jimmy_tang@rapid7.com>"
+'''
+
+RETURN = '''# '''
+
+EXAMPLES = '''
+- name: Send a message to logentries
+ community.general.logentries_msg:
+ token=00000000-0000-0000-0000-000000000000
+ msg="{{ ansible_hostname }}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(module, token, msg, api, port):
+
+ message = "{0} {1}\n".format(token, msg)
+
+ api_ip = socket.gethostbyname(api)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((api_ip, port))
+ try:
+ if not module.check_mode:
+ s.send(message)
+ except Exception as e:
+ module.fail_json(msg="failed to send message, msg=%s" % e)
+ s.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=True),
+ api=dict(type='str', default="data.logentries.com"),
+ port=dict(type='int', default=80)),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ msg = module.params["msg"]
+ api = module.params["api"]
+ port = module.params["port"]
+
+ changed = False
+ try:
+ send_msg(module, token, msg, api, port)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py
new file mode 100644
index 00000000..574f8478
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+- Dag Wieers (@dagwieers)
+module: mail
+short_description: Send an email
+description:
+- This module is useful for sending emails from playbooks.
+- One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+- If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make them perform their
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+- Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+options:
+ sender:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ type: str
+ default: root
+ aliases: [ from ]
+ to:
+ description:
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ default: root
+ aliases: [ recipients ]
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: yes
+ type: str
+ aliases: [ msg ]
+ body:
+ description:
+ - The body of the email being sent.
+ type: str
+ username:
+ description:
+ - If SMTP requires username.
+ type: str
+ password:
+ description:
+ - If SMTP requires password.
+ type: str
+ host:
+ description:
+ - The mail server.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The mail server port.
+ - This must be a valid integer between 1 and 65534
+ type: int
+ default: 25
+ attach:
+ description:
+ - A list of pathnames of files to attach to the message.
+ - Attached files will have their content-type set to C(application/octet-stream).
+ type: list
+ default: []
+ headers:
+ description:
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as C(header=value) (see example below).
+ type: list
+ default: []
+ charset:
+ description:
+ - The character set of email being sent.
+ type: str
+ default: utf-8
+ subtype:
+ description:
+ - The minor mime type, can be either C(plain) or C(html).
+ - The major type is always C(text).
+ type: str
+ choices: [ html, plain ]
+ default: plain
+ secure:
+ description:
+ - If C(always), the connection will only send email if the connection is Encrypted.
+ If the server doesn't accept the encrypted connection it will fail.
+ - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ If it is unable to do so it will fail.
+ type: str
+ choices: [ always, never, starttls, try ]
+ default: try
+ timeout:
+ description:
+ - Sets the timeout in seconds for connection attempts.
+ type: int
+ default: 20
+'''
+
+EXAMPLES = r'''
+- name: Example playbook sending mail to root
+ community.general.mail:
+ subject: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Sending an e-mail using Gmail SMTP servers
+ community.general.mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Send e-mail to a bunch of users, attaching files
+ community.general.mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to:
+ - John Doe <j.d@example.org>
+ - Suzie Something <sue@example.com>
+ cc: Charlie Root <root@localhost>
+ attach:
+ - /etc/group
+ - /tmp/avatar2.png
+ headers:
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
+ charset: us-ascii
+ delegate_to: localhost
+
+- name: Sending an e-mail using the remote machine, not the Ansible controller node
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+
+- name: Sending an e-mail using Legacy SSL to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: always
+
+- name: Sending an e-mail using StartTLS to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+'''
+
+import os
+import smtplib
+import ssl
+import traceback
+from email import encoders
+from email.utils import parseaddr, formataddr, formatdate
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.header import Header
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=25),
+ sender=dict(type='str', default='root', aliases=['from']),
+ to=dict(type='list', default=['root'], aliases=['recipients']),
+ cc=dict(type='list', default=[]),
+ bcc=dict(type='list', default=[]),
+ subject=dict(type='str', required=True, aliases=['msg']),
+ body=dict(type='str'),
+ attach=dict(type='list', default=[]),
+ headers=dict(type='list', default=[]),
+ charset=dict(type='str', default='utf-8'),
+ subtype=dict(type='str', default='plain', choices=['html', 'plain']),
+ secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
+ timeout=dict(type='int', default=20),
+ ),
+ required_together=[['password', 'username']],
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ secure = module.params.get('secure')
+ timeout = module.params.get('timeout')
+
+ code = 0
+ secure_state = False
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ if secure != 'never':
+ try:
+ if PY3:
+ smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP_SSL(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+ secure_state = True
+ except ssl.SSLError as e:
+ if secure == 'always':
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ except Exception:
+ pass
+
+ if not secure_state:
+ if PY3:
+ smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ if int(code) > 0:
+ if not secure_state and secure in ('starttls', 'try'):
+ if smtp.has_extn('STARTTLS'):
+ try:
+ smtp.starttls()
+ secure_state = True
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+ else:
+ if secure == 'starttls':
+ module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port))
+
+ if username and password:
+ if smtp.has_extn('AUTH'):
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port))
+ except smtplib.SMTPException:
+ module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port))
+ else:
+ module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port))
+
+ if not secure_state and (username and password):
+ module.warn('Username and Password was sent without encryption')
+
+ msg = MIMEMultipart(_charset=charset)
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = Header(subject, charset)
+ msg.preamble = "Multipart message"
+
+ for header in headers:
+ # NOTE: Backward compatible with old syntax using '|' as delimiter
+ for hdr in [x.strip() for x in header.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ h_val = to_native(Header(h_val, charset))
+ msg.add_header(h_key, h_val)
+ except Exception:
+ module.warn("Skipping header '%s', unable to parse" % hdr)
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', 'Ansible mail module')
+
+ addr_list = []
+ for addr in [x.strip() for x in blindcopies]:
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+
+ to_list = []
+ for addr in [x.strip() for x in recipients]:
+ to_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['To'] = ", ".join(to_list)
+
+ cc_list = []
+ for addr in [x.strip() for x in copies]:
+ cc_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # This breaks files with spaces in it :-(
+ for filename in attach_files:
+ try:
+ part = MIMEBase('application', 'octet-stream')
+ with open(filename, 'rb') as fp:
+ part.set_payload(fp.read())
+ encoders.encode_base64(part)
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename))
+ msg.attach(part)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" %
+ (filename, to_native(e)), exception=traceback.format_exc())
+
+ composed = msg.as_string()
+
+ try:
+ result = smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" %
+ (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc())
+
+ smtp.quit()
+
+ if result:
+ for key in result:
+ module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1]))
+ module.exit_json(msg='Failed to send mail to at least one recipient', result=result)
+
+ module.exit_json(msg='Mail sent successfully', result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py
new file mode 100644
index 00000000..d94ed2b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: "Jan Christian Grünhage (@jcgruenhage)"
+module: matrix
+short_description: Send notifications to matrix
+description:
+ - This module sends html formatted notifications to matrix rooms.
+options:
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, user_id and password are not required
+ user_id:
+ type: str
+ description:
+ - The user id of the user
+ password:
+ type: str
+ description:
+ - The password to log in with
+requirements:
+ - matrix-client (Python library)
+'''
+
+EXAMPLES = '''
+- name: Send matrix notification with token
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ token: "{{ matrix_auth_token }}"
+
+- name: Send matrix notification with user_id and password
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ user_id: "ansible_notification_bot"
+ password: "{{ matrix_auth_password }}"
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MATRIX_IMP_ERR = None
+try:
+ from matrix_client.client import MatrixClient
+except ImportError:
+ MATRIX_IMP_ERR = traceback.format_exc()
+ matrix_found = False
+else:
+ matrix_found = True
+
+
+def run_module():
+ module_args = dict(
+ msg_plain=dict(type='str', required=True),
+ msg_html=dict(type='str', required=True),
+ room_id=dict(type='str', required=True),
+ hs_url=dict(type='str', required=True),
+ token=dict(type='str', required=False, no_log=True),
+ user_id=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[['password', 'token']],
+ required_one_of=[['password', 'token']],
+ required_together=[['user_id', 'password']],
+ supports_check_mode=True
+ )
+
+ if not matrix_found:
+ module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
+
+ if module.check_mode:
+ return result
+
+ # create a client object
+ client = MatrixClient(module.params['hs_url'])
+ if module.params['token'] is not None:
+ client.api.token = module.params['token']
+ else:
+ client.login(module.params['user_id'], module.params['password'], sync=False)
+
+ # make sure we are in a given room and return a room object for it
+ room = client.join_room(module.params['room_id'])
+ # send an html formatted messages
+ room.send_html(module.params['msg_html'], module.params['msg_plain'])
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py
new file mode 100644
index 00000000..579cfa5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Benjamin Jolivot <bjolivot@gmail.com>
+# Inspired by slack module :
+# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# # (c) 2016, René Moser <mail@renemoser.net>
+# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mattermost
+short_description: Send Mattermost notifications
+description:
+ - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+author: "Benjamin Jolivot (@bjolivot)"
+options:
+ url:
+ type: str
+ description:
+ - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ required: true
+ api_key:
+ type: str
+ description:
+ - Mattermost webhook api key. Log into your mattermost site, go to
+ Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
+ This will give you full URL. api_key is the last part.
+ http://mattermost.example.com/hooks/C(API_KEY)
+ required: true
+ text:
+ type: str
+ description:
+ - Text to send. Note that the module does not handle escaping characters.
+ required: true
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ username:
+ type: str
+ description:
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ default: Ansible
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon.
+ default: https://www.ansible.com/favicon.ico
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: yes
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Send notification message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+
+- name: Send notification message via Mattermost all options
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+ channel: notifications
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+"""
+
+RETURN = '''
+payload:
+ description: Mattermost payload
+ returned: success
+ type: str
+webhook_url:
+ description: URL the webhook is sent to
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str', required=True),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+ # init return dict
+ result = dict(changed=False, msg="OK")
+
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
+ result['webhook_url'] = webhook_url
+
+ # define payload
+ payload = {}
+ for param in ['text', 'channel', 'username', 'icon_url']:
+ if module.params[param] is not None:
+ payload[param] = module.params[param]
+
+ payload = module.jsonify(payload)
+ result['payload'] = payload
+
+ # http headers
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
+
+ # send request if not in test mode
+ if module.check_mode is False:
+ response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
+
+ # something's wrong
+ if info['status'] != 200:
+ # some problem
+ result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
+ module.fail_json(**result)
+
+ # Looks good
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py
new file mode 100644
index 00000000..0551ab20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+description:
+ - Publish a message on an MQTT topic.
+options:
+ server:
+ type: str
+ description:
+ - MQTT broker address/name
+ default: localhost
+ port:
+ type: int
+ description:
+ - MQTT broker port number
+ default: 1883
+ username:
+ type: str
+ description:
+ - Username to authenticate against the broker.
+ password:
+ type: str
+ description:
+ - Password for C(username) to authenticate against the broker.
+ client_id:
+ type: str
+ description:
+ - MQTT client identifier
+ - If not specified, a value C(hostname + pid) will be used.
+ topic:
+ type: str
+ description:
+ - MQTT topic name
+ required: true
+ payload:
+ type: str
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ qos:
+ type: str
+ description:
+ - QoS (Quality of Service)
+ default: "0"
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ type: bool
+ default: 'no'
+ ca_cert:
+ type: path
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ aliases: [ ca_certs ]
+ client_cert:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ certfile ]
+ client_key:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ keyfile ]
+ tls_version:
+ description:
+ - Specifies the version of the SSL/TLS protocol to be used.
+ - By default (if the python version supports it) the highest TLS version is
+ detected. If unavailable, TLS v1 is used.
+ type: str
+ choices:
+ - tlsv1.1
+ - tlsv1.2
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- name: Publish a message on an MQTT topic
+ community.general.mqtt:
+ topic: 'service/ansible/{{ ansible_hostname }}'
+ payload: 'Hello at {{ ansible_date_time.iso8601 }}'
+ qos: 0
+ retain: False
+ client_id: ans001
+ delegate_to: localhost
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+import os
+import ssl
+import traceback
+import platform
+from distutils.version import LooseVersion
+
+HAS_PAHOMQTT = True
+PAHOMQTT_IMP_ERR = None
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ PAHOMQTT_IMP_ERR = traceback.format_exc()
+ HAS_PAHOMQTT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ tls_map = {}
+
+ try:
+ tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
+ except AttributeError:
+ pass
+
+ try:
+ tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
+ except AttributeError:
+ pass
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
+ client_cert=dict(default=None, type='path', aliases=['certfile']),
+ client_key=dict(default=None, type='path', aliases=['keyfile']),
+ tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_cert", None)
+ certfile = module.params.get("client_cert", None)
+ keyfile = module.params.get("client_key", None)
+ tls_version = module.params.get("tls_version", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth = None
+ if username is not None:
+ auth = {'username': username, 'password': password}
+
+ tls = None
+ if ca_certs is not None:
+ if tls_version:
+ tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
+ else:
+ if LooseVersion(platform.python_version()) <= "3.5.2":
+ # Specifying `None` on later versions of python seems sufficient to
+ # instruct python to autonegotiate the SSL/TLS connection. On versions
+ # 3.5.2 and lower though we need to specify the version.
+ #
+ # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
+ # not available until 3.5.3.
+ tls_version = ssl.PROTOCOL_SSLv23
+
+ tls = {
+ 'ca_certs': ca_certs,
+ 'certfile': certfile,
+ 'keyfile': keyfile,
+ 'tls_version': tls_version,
+ }
+
+ try:
+ mqtt.single(
+ topic,
+ payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth,
+ tls=tls
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="unable to publish to MQTT broker %s" % to_native(e),
+ exception=traceback.format_exc()
+ )
+
+ module.exit_json(changed=False, topic=topic)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py
new file mode 100644
index 00000000..e6135cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+author: "Matt Martz (@sivel)"
+options:
+ api_key:
+ type: str
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ type: int
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ type: list
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ community.general.nexmo:
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+"""
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except Exception:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py
new file mode 100644
index 00000000..2574a750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: office_365_connector_card
+short_description: Use webhooks to create Connector Card messages within an Office 365 group
+description:
+ - Creates Connector Card messages through
+ - Office 365 Connectors U(https://dev.outlook.com/Connectors)
+author: "Marc Sensenich (@marc-sensenich)"
+notes:
+ - This module is not idempotent, therefore if the same task is run twice
+ there will be two Connector Cards created
+options:
+ webhook:
+ type: str
+ description:
+ - The webhook URL is given to you when you create a new Connector.
+ required: true
+ summary:
+ type: str
+ description:
+ - A string used for summarizing card content.
+ - This will be shown as the message subject.
+ - This is required if the text parameter isn't populated.
+ color:
+ type: str
+ description:
+ - Accent color used for branding or indicating status in the card.
+ title:
+ type: str
+ description:
+ - A title for the Connector message. Shown at the top of the message.
+ text:
+ type: str
+ description:
+ - The main text of the card.
+ - This will be rendered below the sender information and optional title,
+ - and above any sections or actions present.
+ actions:
+ type: list
+ description:
+ - This array of objects will power the action links
+ - found at the bottom of the card.
+ sections:
+ type: list
+ description:
+ - Contains a list of sections to display in the card.
+ - For more information see https://dev.outlook.com/Connectors/reference.
+'''
+
+EXAMPLES = """
+- name: Create a simple Connector Card
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ text: 'Hello, World!'
+
+- name: Create a Connector Card with the full format
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ summary: This is the summary property
+ title: This is the **card's title** property
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ color: E81123
+ sections:
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
+ actions:
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its
+ **startGroup** property to true.
+"""
+
+RETURN = """
+"""
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
+OFFICE_365_CARD_TYPE = "MessageCard"
+OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
+OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
+
+
+def build_actions(actions):
+ action_items = []
+
+ for action in actions:
+ action_item = snake_dict_to_camel_dict(action)
+ action_items.append(action_item)
+
+ return action_items
+
+
+def build_sections(sections):
+ sections_created = []
+
+ for section in sections:
+ sections_created.append(build_section(section))
+
+ return sections_created
+
+
+def build_section(section):
+ section_payload = dict()
+
+ if 'title' in section:
+ section_payload['title'] = section['title']
+
+ if 'start_group' in section:
+ section_payload['startGroup'] = section['start_group']
+
+ if 'activity_image' in section:
+ section_payload['activityImage'] = section['activity_image']
+
+ if 'activity_title' in section:
+ section_payload['activityTitle'] = section['activity_title']
+
+ if 'activity_subtitle' in section:
+ section_payload['activitySubtitle'] = section['activity_subtitle']
+
+ if 'activity_text' in section:
+ section_payload['activityText'] = section['activity_text']
+
+ if 'hero_image' in section:
+ section_payload['heroImage'] = section['hero_image']
+
+ if 'text' in section:
+ section_payload['text'] = section['text']
+
+ if 'facts' in section:
+ section_payload['facts'] = section['facts']
+
+ if 'images' in section:
+ section_payload['images'] = section['images']
+
+ if 'actions' in section:
+ section_payload['potentialAction'] = build_actions(section['actions'])
+
+ return section_payload
+
+
+def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
+ payload = dict()
+ payload['@context'] = OFFICE_365_CARD_CONTEXT
+ payload['@type'] = OFFICE_365_CARD_TYPE
+
+ if summary is not None:
+ payload['summary'] = summary
+
+ if color is not None:
+ payload['themeColor'] = color
+
+ if title is not None:
+ payload['title'] = title
+
+ if text is not None:
+ payload['text'] = text
+
+ if actions:
+ payload['potentialAction'] = build_actions(actions)
+
+ if sections:
+ payload['sections'] = build_sections(sections)
+
+ payload = module.jsonify(payload)
+ return payload
+
+
+def do_notify_connector_card_webhook(module, webhook, payload):
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ response, info = fetch_url(
+ module=module,
+ url=webhook,
+ headers=headers,
+ method='POST',
+ data=payload
+ )
+
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == 400 and module.check_mode:
+ if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
+ else:
+ module.fail_json(
+ msg="failed to send %s as a connector card to Incoming Webhook: %s"
+ % (payload, info['msg'])
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook=dict(required=True, no_log=True),
+ summary=dict(type='str'),
+ color=dict(type='str'),
+ title=dict(type='str'),
+ text=dict(type='str'),
+ actions=dict(type='list'),
+ sections=dict(type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ webhook = module.params['webhook']
+ summary = module.params['summary']
+ color = module.params['color']
+ title = module.params['title']
+ text = module.params['text']
+ actions = module.params['actions']
+ sections = module.params['sections']
+
+ payload = build_payload_for_connector_card(
+ module,
+ summary,
+ color,
+ title,
+ text,
+ actions,
+ sections)
+
+ if module.check_mode:
+ # In check mode, send an empty payload to validate connection
+ check_mode_payload = build_payload_for_connector_card(module)
+ do_notify_connector_card_webhook(module, webhook, check_mode_payload)
+
+ do_notify_connector_card_webhook(module, webhook, payload)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py
new file mode 100644
index 00000000..ab27fd5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+options:
+ api_key:
+ type: str
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ type: str
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ device:
+ type: str
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ push_type:
+ type: str
+ description:
+ - Thing you wish to push.
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ type: str
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ type: str
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+- name: Sends a push notification to a device
+ community.general.pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+- name: Sends a link to a device
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ device: Chrome
+ push_type: link
+ title: Ansible Documentation
+ body: https://docs.ansible.com/
+
+- name: Sends a push notification to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: Broadcasting a message to the #my-awesome-channel folks
+
+- name: Sends a push notification with title and body to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: ALERT! Signup service is down
+ body: Error rate on signup service is over 90% for more than 2 minutes
+'''
+
+import traceback
+
+PUSHBULLET_IMP_ERR = None
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ PUSHBULLET_IMP_ERR = traceback.format_exc()
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
+ ),
+ mutually_exclusive=(
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py
new file mode 100644
index 00000000..7f73592a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# Copyright (c) 2019, Bernd Arnold <wopfel@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pushover
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ type: str
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ type: str
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ title:
+ type: str
+ description:
+ - Message title.
+ required: false
+ pri:
+ type: str
+ description:
+ - Message priority (see U(https://pushover.net) for details).
+ required: false
+ default: '0'
+ choices: [ '-2', '-1', '0', '1', '2' ]
+ device:
+ type: str
+ description:
+ - A device the message should be sent to. Multiple devices can be specified, separated by a comma.
+ required: false
+ version_added: 1.2.0
+
+author:
+ - "Jim Richardson (@weaselkeeper)"
+ - "Bernd Arnold (@wopfel)"
+'''
+
+EXAMPLES = '''
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} is acting strange ...'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ title: 'Alert!'
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ pri: 1
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net to a specific device
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} has been lost somewhere'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ device: admins-iPhone
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg, title, device):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s/1/messages.json' % (self.base_uri)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+
+ if title is not None:
+ options = dict(options,
+ title=title)
+
+ if device is not None:
+ options = dict(options,
+ device=device)
+
+ data = urlencode(options)
+
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ title=dict(type='str'),
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ device=dict(type='str'),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device'])
+ except Exception:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py
new file mode 100644
index 00000000..13a93dd8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(example.com) or C(chat.example.com))
+ required: true
+ token:
+ type: str
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ type: str
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ type: str
+ description:
+ - Message to be sent.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specified during the creation of webhook.
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments.
+'''
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Rocket Chat all options
+ community.general.rocketchat:
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: '{{ inventory_hostname }} completed'
+ channel: #ansible
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+ delegate_to: localhost
+
+- name: Use the attachments API
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: Display my system load on host A and B
+ color: #ff00dd
+ title: System load
+ fields:
+ - title: System A
+ value: 'load average: 0,74, 0,66, 0,63'
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+ delegate_to: localhost
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: bool
+ sample: false
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload = "payload=" + module.jsonify(payload)
+ return payload
+
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False),
+ channel=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py
new file mode 100644
index 00000000..1c66adf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Michael DeHaan <michael@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: say
+short_description: Makes a computer to speak.
+description:
+ - makes a computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
+ - If you like this module, you may also be interested in the osx_say callback plugin.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+options:
+ msg:
+ type: str
+ description:
+ What to say
+ required: true
+ voice:
+ type: str
+ description:
+ What voice to use
+ required: false
+requirements: [ say or espeak or espeak-ng ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- name: Makes a computer to speak
+ community.general.say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
+'''
+import platform
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def say(module, executable, msg, voice):
+ cmd = [executable, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+ possibles = ('say', 'espeak', 'espeak-ng')
+
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ voice = None
+
+ for possible in possibles:
+ executable = module.get_bin_path(possible)
+ if executable:
+ break
+ else:
+ module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
+
+ if module.check_mode:
+ module.exit_json(msg=msg, changed=False)
+
+ say(module, executable, msg, voice)
+
+ module.exit_json(msg=msg, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py
new file mode 100644
index 00000000..67132771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
+requirements:
+ - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
+options:
+ username:
+ type: str
+ description:
+ - Username for logging into the SendGrid account.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ password:
+ type: str
+ description:
+ - Password that corresponds to the username.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ from_address:
+ type: str
+ description:
+ - The address in the "from" field for the email.
+ required: true
+ to_addresses:
+ type: list
+ description:
+ - A list with one or more recipient email addresses.
+ required: true
+ subject:
+ type: str
+ description:
+ - The desired subject for the email.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Sendgrid API key to use instead of username/password.
+ cc:
+ type: list
+ description:
+ - A list of email addresses to cc.
+ bcc:
+ type: list
+ description:
+ - A list of email addresses to bcc.
+ attachments:
+ type: list
+ description:
+ - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
+ from_name:
+ type: str
+ description:
+ - The name you want to appear in the from field, i.e 'John Doe'.
+ html_body:
+ description:
+ - Whether the body is html content that should be rendered.
+ type: bool
+ default: 'no'
+ headers:
+ type: dict
+ description:
+ - A dict to pass on as headers.
+ body:
+ type: str
+ description:
+ - The e-mail body content.
+ required: yes
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = r'''
+- name: Send an email to a single recipient that the deployment was successful
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+- name: Send an email to more than one recipient that the build failed
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+SENDGRID_IMP_ERR = None
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ SENDGRID_IMP_ERR = traceback.format_exc()
+ HAS_SENDGRID = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.urls import fetch_url
+
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
+ encoded_data = urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ recipient = to_bytes(recipient, errors='surrogate_or_strict')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+ # Remove this check when adding Sendgrid API v3 support
+ if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
+ module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.")
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together=[['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ reason = 'when using any of the following arguments: ' \
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments'
+ module.fail_json(msg=missing_required_lib('sendgrid', reason=reason),
+ exception=SENDGRID_IMP_ERR)
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py
new file mode 100644
index 00000000..946fc9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py
@@ -0,0 +1,487 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Lee Goolsbee <lgoolsbee@atlassian.com>
+# (c) 2020, Michal Middleton <mm.404@icloud.com>
+# (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# (c) 2016, René Moser <mail@renemoser.net>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ token:
+ type: str
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Make sure to use the correct type of token, depending on what method you use.
+ - "Webhook token:
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working. ** Please keep in mind the tokens
+ are not the API tokens but are the webhook tokens. In slack these are
+ found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may
+ be locked by your Slack admin and you must request access. It is there
+ that the incoming webhooks can be added. The key is on the end of the
+ URL given to you in that section."
+ - "WebAPI token:
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
+ or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to send. Note that the module does not handle escaping characters.
+ Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &amp;) before sending.
+ See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ thread_id:
+ description:
+ - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
+ type: str
+ message_id:
+ description:
+ - Optional. Message ID to edit, instead of posting a new message.
+ Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
+ type: str
+ version_added: 1.2.0
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
+ default: https://www.ansible.com/favicon.ico
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ type: str
+ description:
+ - Setting for the message parser at Slack
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
+ - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
+ - Specifying value in hex is supported since Ansible 2.8.
+ default: 'normal'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/docs/attachments).
+ blocks:
+ description:
+ - Define a list of blocks. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/block-kit).
+ type: list
+ elements: dict
+ version_added: 1.0.0
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Slack all options
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ channel: '#ansible'
+ thread_id: '1539917263.000100'
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ parse: 'none'
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+
+- name: Insert a color bar in front of the message with valid hex color value
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: 'This message uses color in hex value'
+ color: '#00aacc'
+ username: ''
+ icon_url: ''
+
+- name: Use the attachments API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+
+- name: Use the blocks API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ blocks:
+ - type: section
+ text:
+ type: mrkdwn
+ text: |-
+ *System load*
+ Display my system load on host A and B
+ - type: context
+ elements:
+ - type: mrkdwn
+ text: |-
+ *System A*
+ load average: 0,74, 0,66, 0,63
+ - type: mrkdwn
+ text: |-
+ *System B*
+ load average: 5,16, 4,64, 2,43
+
+- name: Send a message with a link using Slack markup
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: We sent this message using <https://www.ansible.com|Ansible>!
+
+- name: Send a message with angle brackets and ampersands
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: This message has &lt;brackets&gt; &amp; ampersands in plain text.
+
+- name: Initial Threaded Slack message
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ msg: 'Starting a thread with my initial post.'
+ register: slack_response
+- name: Add more info to thread
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ thread_id: "{{ slack_response['ts'] }}"
+ color: good
+ msg: 'And this is my threaded response!'
+
+- name: Send a message to be edited later on
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: '#ansible'
+ msg: Deploying something...
+ register: slack_response
+- name: Edit message
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: "{{ slack_response.channel }}"
+ msg: Deployment complete!
+ message_id: "{{ slack_response.ts }}"
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+
+# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
+# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
+escape_table = {
+ '"': "\"",
+ "'": "\'",
+}
+
+
+def is_valid_hex_color(color_choice):
+ if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice):
+ return True
+ return False
+
+
+def escape_quotes(text):
+ '''Backslash any quotes within text.'''
+ return "".join(escape_table.get(c, c) for c in text)
+
+
+def recursive_escape_quotes(obj, keys):
+ '''Recursively escape quotes inside supplied keys inside block kit objects'''
+ if isinstance(obj, dict):
+ escaped = {}
+ for k, v in obj.items():
+ if isinstance(v, str) and k in keys:
+ escaped[k] = escape_quotes(v)
+ else:
+ escaped[k] = recursive_escape_quotes(v, keys)
+ elif isinstance(obj, list):
+ escaped = [recursive_escape_quotes(v, keys) for v in obj]
+ else:
+ escaped = obj
+ return escaped
+
+
+def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=escape_quotes(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if channel.startswith(('#', '@', 'C0')):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if thread_id is not None:
+ payload['thread_ts'] = thread_id
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+ if message_id is not None:
+ payload['ts'] = message_id
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ attachment_keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in attachment_keys_to_escape:
+ if key in attachment:
+ attachment[key] = escape_quotes(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ if blocks is not None:
+ block_keys_to_escape = [
+ 'text',
+ 'alt_text'
+ ]
+ payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape)
+
+ return payload
+
+
+def get_slack_message(module, domain, token, channel, ts):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + token
+ }
+ qs = urlencode({
+ 'channel': channel,
+ 'ts': ts,
+ 'limit': 1,
+ 'inclusive': 'true',
+ })
+ url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to get slack message")
+ data = module.from_json(response.read())
+ if len(data['messages']) < 1:
+ module.fail_json(msg="no messages matching ts: %s" % ts)
+ if len(data['messages']) > 1:
+ module.fail_json(msg="more than 1 message matching ts: %s" % ts)
+ return data['messages'][0]
+
+
+def do_notify_slack(module, domain, token, payload):
+ use_webapi = False
+ if token.count('/') >= 2:
+ # New style webhook token
+ slack_uri = SLACK_INCOMING_WEBHOOK % (token)
+ elif re.match(r'^xox[abp]-\S+$', token):
+ slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ use_webapi = True
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form "
+ "XXXX/YYYY/ZZZZ in your playbook")
+ slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+ if use_webapi:
+ headers['Authorization'] = 'Bearer ' + token
+
+ data = module.jsonify(payload)
+ response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data)
+
+ if info['status'] != 200:
+ if use_webapi:
+ obscured_incoming_webhook = slack_uri
+ else:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
+
+ # each API requires different handling
+ if use_webapi:
+ return module.from_json(response.read())
+ else:
+ return {'webhook': 'ok'}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=False, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ thread_id=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal'),
+ attachments=dict(type='list', required=False, default=None),
+ blocks=dict(type='list', elements='dict'),
+ message_id=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ thread_id = module.params['thread_id']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+ blocks = module.params['blocks']
+ message_id = module.params['message_id']
+
+ color_choices = ['normal', 'good', 'warning', 'danger']
+ if color not in color_choices and not is_valid_hex_color(color):
+ module.fail_json(msg="Color value specified should be either one of %r "
+ "or any valid hex value with length 3 or 6." % color_choices)
+
+ changed = True
+
+ # if updating an existing message, we can check if there's anything to update
+ if message_id is not None:
+ changed = False
+ msg = get_slack_message(module, domain, token, channel, message_id)
+ for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
+ if msg.get(key) != module.params.get(key):
+ changed = True
+ break
+ # if check mode is active, we shouldn't do anything regardless.
+ # if changed=False, we don't need to do anything, so don't do it.
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel'])
+ elif module.check_mode:
+ module.exit_json(changed=changed)
+
+ payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id)
+ slack_response = do_notify_slack(module, domain, token, payload)
+
+ if 'ok' in slack_response:
+ # Evaluate WebAPI response
+ if slack_response['ok']:
+ # return payload as a string for backwards compatibility
+ payload_json = module.jsonify(payload)
+ module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'],
+ api=slack_response, payload=payload_json)
+ else:
+ module.fail_json(msg="Slack API error", error=slack_response['error'])
+ else:
+ # Exit with plain OK from WebHook, since we don't have more information
+ # If we get 200 from webhook, the only answer is OK
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py
new file mode 100644
index 00000000..7f4f899f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syslogger
+short_description: Log messages in the syslog
+description:
+ - Uses syslog to add log entries to the host.
+options:
+ msg:
+ type: str
+ description:
+ - This is the message to place in syslog.
+ required: True
+ priority:
+ type: str
+ description:
+ - Set the log priority.
+ choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
+ default: "info"
+ facility:
+ type: str
+ description:
+ - Set the log facility.
+ choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
+ "uucp", "cron", "syslog", "local0", "local1", "local2",
+ "local3", "local4", "local5", "local6", "local7" ]
+ default: "daemon"
+ log_pid:
+ description:
+ - Log the PID in brackets.
+ type: bool
+ default: False
+ ident:
+ description:
+ - Specify the name of application name which is sending the log to syslog.
+ type: str
+ default: 'ansible_syslogger'
+ version_added: '0.2.0'
+author:
+ - Tim Rightnour (@garbled1)
+'''
+
+EXAMPLES = r'''
+- name: Simple Usage
+ community.general.syslogger:
+ msg: "I will end up as daemon.info"
+
+- name: Send a log message with err priority and user facility with log_pid
+ community.general.syslogger:
+ msg: "Hello from Ansible"
+ priority: "err"
+ facility: "user"
+ log_pid: true
+
+- name: Specify the name of application which is sending log message
+ community.general.syslogger:
+ ident: "MyApp"
+ msg: "I want to believe"
+ priority: "alert"
+'''
+
+RETURN = r'''
+ident:
+ description: Name of application sending the message to log
+ returned: always
+ type: str
+ sample: "ansible_syslogger"
+ version_added: '0.2.0'
+priority:
+ description: Priority level
+ returned: always
+ type: str
+ sample: "daemon"
+facility:
+ description: Syslog facility
+ returned: always
+ type: str
+ sample: "info"
+log_pid:
+ description: Log PID status
+ returned: always
+ type: bool
+ sample: True
+msg:
+ description: Message sent to syslog
+ returned: always
+ type: str
+ sample: "Hello from Ansible"
+'''
+
+import syslog
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_facility(facility):
+ return {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'syslog': syslog.LOG_SYSLOG,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+ }.get(facility, syslog.LOG_DAEMON)
+
+
+def get_priority(priority):
+ return {
+ 'emerg': syslog.LOG_EMERG,
+ 'alert': syslog.LOG_ALERT,
+ 'crit': syslog.LOG_CRIT,
+ 'err': syslog.LOG_ERR,
+ 'warning': syslog.LOG_WARNING,
+ 'notice': syslog.LOG_NOTICE,
+ 'info': syslog.LOG_INFO,
+ 'debug': syslog.LOG_DEBUG
+ }.get(priority, syslog.LOG_INFO)
+
+
+def main():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ ident=dict(type='str', default='ansible_syslogger'),
+ msg=dict(type='str', required=True),
+ priority=dict(type='str', required=False,
+ choices=["emerg", "alert", "crit", "err", "warning",
+ "notice", "info", "debug"],
+ default='info'),
+ facility=dict(type='str', required=False,
+ choices=["kern", "user", "mail", "daemon", "auth",
+ "lpr", "news", "uucp", "cron", "syslog",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"],
+ default='daemon'),
+ log_pid=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ )
+
+ result = dict(
+ changed=False,
+ ident=module.params['ident'],
+ priority=module.params['priority'],
+ facility=module.params['facility'],
+ log_pid=module.params['log_pid'],
+ msg=module.params['msg']
+ )
+
+ # do the logging
+ try:
+ syslog.openlog(module.params['ident'],
+ syslog.LOG_PID if module.params['log_pid'] else 0,
+ get_facility(module.params['facility']))
+ syslog.syslog(get_priority(module.params['priority']),
+ module.params['msg'])
+ syslog.closelog()
+ result['changed'] = True
+
+ except Exception as exc:
+ module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py
new file mode 100644
index 00000000..c1ef841c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: telegram
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ msg_format:
+ type: str
+ description:
+ - Message format. Formatting options `markdown` and `html` described in
+ Telegram API docs (https://core.telegram.org/bots/api#formatting-options).
+ If option `plain` set, message will not be formatted.
+ default: plain
+ choices: [ "plain", "markdown", "html" ]
+ token:
+ type: str
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ type: str
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+'''
+
+EXAMPLES = """
+
+- name: Send a message to chat in playbook
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ chat_id: 000000
+ msg: Ansible task finished
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: str
+ sample: "Ansible task finished"
+telegram_error:
+ description: Error message gotten from Telegram API
+ returned: failure
+ type: str
+ sample: "Bad Request: message text is empty"
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ chat_id=dict(type='str', required=True, no_log=True),
+ msg_format=dict(type='str', required=False, default='plain',
+ choices=['plain', 'markdown', 'html']),
+ msg=dict(type='str', required=True)),
+ supports_check_mode=True
+ )
+
+ token = quote(module.params.get('token'))
+ chat_id = quote(module.params.get('chat_id'))
+ msg_format = quote(module.params.get('msg_format'))
+ msg = quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/bot' + token + \
+ '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+ if msg_format in ('markdown', 'html'):
+ url += '&parse_mode=' + msg_format
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ body = json.loads(info['body'])
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']),
+ telegram_error=body['description'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py
new file mode 100644
index 00000000..5ec995f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ type: str
+ description:
+ user's Twilio account token found on the account page
+ required: true
+ auth_token:
+ type: str
+ description: user's Twilio authentication token
+ required: true
+ msg:
+ type: str
+ description:
+ the body of the text message
+ required: true
+ to_numbers:
+ type: list
+ description:
+ one or more phone numbers to send the text message to,
+ format +15551112222
+ required: true
+ aliases: [ to_number ]
+ from_number:
+ type: str
+ description:
+ the Twilio number to send the text message from, format +15551112222
+ required: true
+ media_url:
+ type: str
+ description:
+ a URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: All servers with webserver role are now configured.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: This server configuration is now complete.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15553258899
+ to_numbers:
+ - +15551113232
+ - +12025551235
+ - +19735559010
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: Deployment complete!
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ media_url: https://demo.twilio.com/logo.png
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_numbers=dict(required=True, aliases=['to_number'], type='list'),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_numbers = module.params['to_numbers']
+ media_url = module.params['media_url']
+
+ for number in to_numbers:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = module.from_json(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py
new file mode 100644
index 00000000..6f8e4e8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: typetalk
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API
+options:
+ client_id:
+ type: str
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ type: str
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ type: int
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ type: str
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to typetalk
+ community.general.typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.com/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.com/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError as e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py
new file mode 100644
index 00000000..3ef81eaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm).
+author: "Chris Hoffman (@chrishoffman)"
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version to be installed.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: no
+ type: bool
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: no
+ unsafe_perm:
+ description:
+ - Use the C(--unsafe-perm) flag when installing.
+ type: bool
+ default: no
+ ci:
+ description:
+ - Install packages based on package-lock file, same as running C(npm ci).
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies.
+ required: false
+ type: bool
+ default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ type: str
+ state:
+ description:
+ - The state of the node.js library.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - npm installed in bin path (recommended /usr/local/bin)
+'''
+
+EXAMPLES = r'''
+- name: Install "coffee-script" node.js package.
+ community.general.npm:
+ name: coffee-script
+ path: /app/location
+
+- name: Install "coffee-script" node.js package on version 1.6.1.
+ community.general.npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
+
+- name: Install "coffee-script" node.js package globally.
+ community.general.npm:
+ name: coffee-script
+ global: yes
+
+- name: Remove the globally package "coffee-script".
+ community.general.npm:
+ name: coffee-script
+ global: yes
+ state: absent
+
+- name: Install "coffee-script" node.js package from custom registry.
+ community.general.npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.npm:
+ path: /app/location
+
+- name: Update packages based on package.json to their latest version.
+ community.general.npm:
+ path: /app/location
+ state: latest
+
+- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
+ community.general.npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
+'''
+
+import json
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.unsafe_perm = kwargs['unsafe_perm']
+ self.state = kwargs['state']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version'] and self.state != 'absent':
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production and ('install' in cmd or 'update' in cmd):
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.unsafe_perm:
+ cmd.append('--unsafe-perm')
+ if self.name and add_package_name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json', '--long']
+
+ installed = list()
+ missing = list()
+ data = {}
+ try:
+ data = json.loads(self._exec(cmd, True, False, False) or '{}')
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
+ missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if self.name and self.name not in installed:
+ missing.append(self.name)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def ci_install(self):
+ return self._exec(['ci'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split(r'\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, type='str'),
+ path=dict(default=None, type='path'),
+ version=dict(default=None, type='str'),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ unsafe_perm=dict(default=False, type='bool'),
+ ci=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+ unsafe_perm = module.params['unsafe_perm']
+ ci = module.params['ci']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts,
+ unsafe_perm=unsafe_perm, state=state)
+
+ changed = False
+ if ci:
+ npm.ci_install()
+ changed = True
+ elif state == 'present':
+ installed, missing = npm.list()
+ if missing:
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if missing:
+ changed = True
+ npm.install()
+ if outdated:
+ changed = True
+ npm.update()
+ else: # absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py
new file mode 100644
index 00000000..9d4a5186
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+
+# (c) 2016, Marcin Skarbek <github@skarbek.name>
+# (c) 2016, Andreas Olsson <andreas@arrakis.se>
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+#
+# This module was ported from https://github.com/mskarbek/ansible-nsupdate
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nsupdate
+
+short_description: Manage DNS records.
+description:
+ - Create, update and remove DNS records using DDNS updates
+requirements:
+ - dnspython
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ port:
+ description:
+ - Use this TCP port when connecting to C(server).
+ default: 53
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS C(server)
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ key_algorithm:
+ description:
+ - Specify key algorithm used by C(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
+ 'hmac-sha512']
+ default: 'hmac-md5'
+ zone:
+ description:
+ - DNS record will be modified on this C(zone).
+ - When omitted DNS will be queried to attempt finding the correct zone.
+ - Starting with Ansible 2.7 this parameter is optional.
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ value:
+ description:
+ - Sets the record value.
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+'''
+
+EXAMPLES = '''
+- name: Add or modify ansible.example.org A to 192.168.1.1"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: "192.168.1.1"
+
+- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
+
+- name: Remove puppet.example.org CNAME
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "puppet"
+ type: "CNAME"
+ state: absent
+
+- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ value: "ansible.example.org."
+ state: present
+
+- name: Remove 1.1.168.192.in-addr.arpa. PTR
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: If module has modified record
+ returned: success
+ type: str
+record:
+ description: DNS record
+ returned: success
+ type: str
+ sample: 'ansible'
+ttl:
+ description: DNS record TTL
+ returned: success
+ type: int
+ sample: 86400
+type:
+ description: DNS record type
+ returned: success
+ type: str
+ sample: 'CNAME'
+value:
+ description: DNS record value(s)
+ returned: success
+ type: list
+ sample: '192.168.1.1'
+zone:
+ description: DNS record zone
+ returned: success
+ type: str
+ sample: 'example.org.'
+dns_rc:
+ description: dnspython return code
+ returned: always
+ type: int
+ sample: 4
+dns_rc_str:
+ description: dnspython return code (string representation)
+ returned: always
+ type: str
+ sample: 'REFUSED'
+'''
+
+import traceback
+
+from binascii import Error as binascii_error
+from socket import error as socket_error
+
+DNSPYTHON_IMP_ERR = None
+try:
+ import dns.update
+ import dns.query
+ import dns.tsigkeyring
+ import dns.message
+ import dns.resolver
+
+ HAVE_DNSPYTHON = True
+except ImportError:
+ DNSPYTHON_IMP_ERR = traceback.format_exc()
+ HAVE_DNSPYTHON = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class RecordManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['key_name']:
+ try:
+ self.keyring = dns.tsigkeyring.from_text({
+ module.params['key_name']: module.params['key_secret']
+ })
+ except TypeError:
+ module.fail_json(msg='Missing key_secret')
+ except binascii_error as e:
+ module.fail_json(msg='TSIG key error: %s' % to_native(e))
+ else:
+ self.keyring = None
+
+ if module.params['key_algorithm'] == 'hmac-md5':
+ self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
+ else:
+ self.algorithm = module.params['key_algorithm']
+
+ if module.params['zone'] is None:
+ if module.params['record'][-1] != '.':
+ self.module.fail_json(msg='record must be absolute when omitting zone parameter')
+ self.zone = self.lookup_zone()
+ else:
+ self.zone = module.params['zone']
+
+ if self.zone[-1] != '.':
+ self.zone += '.'
+
+ if module.params['record'][-1] != '.':
+ self.fqdn = module.params['record'] + '.' + self.zone
+ else:
+ self.fqdn = module.params['record']
+
+ if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None:
+ self.value = list(map(self.txt_helper, self.module.params['value']))
+ else:
+ self.value = self.module.params['value']
+
+ self.dns_rc = 0
+
+ def txt_helper(self, entry):
+ if entry[0] == '"' and entry[-1] == '"':
+ return entry
+ return '"{text}"'.format(text=entry)
+
+ def lookup_zone(self):
+ name = dns.name.from_text(self.module.params['record'])
+ while True:
+ query = dns.message.make_query(name, dns.rdatatype.SOA)
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
+ self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
+ self.module.params['server'], self.module.params['record']))
+ try:
+ zone = lookup.authority[0].name
+ if zone == name:
+ return zone.to_text()
+ except IndexError:
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
+
+ def __do_update(self, update):
+ response = None
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ return response
+
+ def create_or_update_record(self):
+ result = {'changed': False, 'failed': False}
+
+ exists = self.record_exists()
+ if exists in [0, 2]:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ if exists == 0:
+ self.dns_rc = self.create_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
+
+ elif exists == 2:
+ self.dns_rc = self.modify_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ else:
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ return result
+
+ def create_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ response = self.__do_update(update)
+ return dns.message.Message.rcode(response)
+
+ def modify_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+
+ return dns.message.Message.rcode(response)
+
+ def remove_record(self):
+ result = {'changed': False, 'failed': False}
+
+ if self.record_exists() == 0:
+ return result
+
+ # Check mode and record exists, declared fake change.
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
+ else:
+ result['changed'] = True
+
+ return result
+
+ def record_exists(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ try:
+ update.present(self.module.params['record'], self.module.params['type'])
+ except dns.rdatatype.UnknownRdatatype as e:
+ self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.module.params['state'] == 'absent':
+ return 1
+ for entry in self.value:
+ try:
+ update.present(self.module.params['record'], self.module.params['type'], entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.ttl_changed():
+ return 2
+ else:
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+ def ttl_changed(self):
+ query = dns.message.make_query(self.fqdn, self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ if lookup.rcode() != dns.rcode.NOERROR:
+ self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
+
+ current_ttl = lookup.answer[0].ttl
+ return current_ttl != self.module.params['ttl']
+
+
+def main():
+ tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
+ 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ server=dict(required=True, type='str'),
+ port=dict(required=False, default=53, type='int'),
+ key_name=dict(required=False, type='str'),
+ key_secret=dict(required=False, type='str', no_log=True),
+ key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(required=False, default=None, type='str'),
+ record=dict(required=True, type='str'),
+ type=dict(required=False, default='A', type='str'),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False, default=None, type='list'),
+ protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DNSPYTHON:
+ module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
+
+ if len(module.params["record"]) == 0:
+ module.fail_json(msg='record cannot be empty.')
+
+ record = RecordManager(module)
+ result = {}
+ if module.params["state"] == 'absent':
+ result = record.remove_record()
+ elif module.params["state"] == 'present':
+ result = record.create_or_update_record()
+
+ result['dns_rc'] = record.dns_rc
+ result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ result['record'] = dict(zone=record.zone,
+ record=module.params['record'],
+ type=module.params['type'],
+ ttl=module.params['ttl'],
+ value=record.value)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py
new file mode 100644
index 00000000..06dc4af0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oci_vcn
+short_description: Manage Virtual Cloud Networks(VCN) in OCI
+description:
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
+ The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
+ U(https://github.com/oracle/oci-ansible-modules/releases).
+options:
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
+ This option is mutually exclusive with I(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it's changeable.
+ type: str
+ aliases: [ 'name' ]
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
+ form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
+ bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
+ to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
+ with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
+ with I(state=present). This option is mutually exclusive with I(compartment_id).
+ type: str
+ aliases: [ 'id' ]
+author: "Rohit Chaware (@rohitChaware)"
+extends_documentation_fragment:
+- community.general.oracle
+- community.general.oracle_creatable_resource
+- community.general.oracle_wait_options
+- community.general.oracle_tags
+
+'''
+
+EXAMPLES = """
+- name: Create a VCN
+ community.general.oci_vcn:
+ cidr_block: '10.0.0.0/16'
+ compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
+ display_name: my_vcn
+ dns_label: ansiblevcn
+
+- name: Updates the specified VCN's display name
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ display_name: ansible_vcn
+
+- name: Delete the specified VCN
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ state: absent
+"""
+
+RETURN = """
+vcn:
+ description: Information about the VCN
+ returned: On successful create and update operation
+ type: dict
+ sample: {
+ "cidr_block": "10.0.0.0/16",
+ compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
+
+try:
+ from oci.core.virtual_network_client import VirtualNetworkClient
+ from oci.core.models import CreateVcnDetails
+ from oci.core.models import UpdateVcnDetails
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+def delete_vcn(virtual_network_client, module):
+ result = oci_utils.delete_and_wait(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ delete_fn=virtual_network_client.delete_vcn,
+ kwargs_delete={"vcn_id": module.params["vcn_id"]},
+ module=module,
+ )
+ return result
+
+
+def update_vcn(virtual_network_client, module):
+ result = oci_utils.check_and_update_resource(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ update_fn=virtual_network_client.update_vcn,
+ primitive_params_update=["vcn_id"],
+ kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
+ module=module,
+ update_attributes=UpdateVcnDetails().attribute_map.keys(),
+ )
+ return result
+
+
+def create_vcn(virtual_network_client, module):
+ create_vcn_details = CreateVcnDetails()
+ for attribute in create_vcn_details.attribute_map.keys():
+ if attribute in module.params:
+ setattr(create_vcn_details, attribute, module.params[attribute])
+
+ result = oci_utils.create_and_wait(
+ resource_type="vcn",
+ create_fn=virtual_network_client.create_vcn,
+ kwargs_create={"create_vcn_details": create_vcn_details},
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ get_param="vcn_id",
+ module=module,
+ )
+ return result
+
+
+def main():
+ module_args = oci_utils.get_taggable_arg_spec(
+ supports_create=True, supports_wait=True
+ )
+ module_args.update(
+ dict(
+ cidr_block=dict(type="str", required=False),
+ compartment_id=dict(type="str", required=False),
+ display_name=dict(type="str", required=False, aliases=["name"]),
+ dns_label=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["absent", "present"],
+ ),
+ vcn_id=dict(type="str", required=False, aliases=["id"]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ mutually_exclusive=[["compartment_id", "vcn_id"]],
+ )
+
+ if not HAS_OCI_PY_SDK:
+ module.fail_json(msg=missing_required_lib("oci"))
+
+ virtual_network_client = oci_utils.create_service_client(
+ module, VirtualNetworkClient
+ )
+
+ exclude_attributes = {"display_name": True, "dns_label": True}
+ state = module.params["state"]
+ vcn_id = module.params["vcn_id"]
+
+ if state == "absent":
+ if vcn_id is not None:
+ result = delete_vcn(virtual_network_client, module)
+ else:
+ module.fail_json(
+ msg="Specify vcn_id with state as 'absent' to delete a VCN."
+ )
+
+ else:
+ if vcn_id is not None:
+ result = update_vcn(virtual_network_client, module)
+ else:
+ result = oci_utils.check_and_create_resource(
+ resource_type="vcn",
+ create_fn=create_vcn,
+ kwargs_create={
+ "virtual_network_client": virtual_network_client,
+ "module": module,
+ },
+ list_fn=virtual_network_client.list_vcns,
+ kwargs_list={"compartment_id": module.params["compartment_id"]},
+ module=module,
+ model=CreateVcnDetails(),
+ exclude_attributes=exclude_attributes,
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py
new file mode 100644
index 00000000..313a7f70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Westcott <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: odbc
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "1.0.0"
+short_description: Execute SQL via ODBC
+description:
+ - Read/Write info via ODBC drivers.
+options:
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: yes
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: yes
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is C(true) to support legacy module behavior.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+requirements:
+ - "python >= 2.6"
+ - "pyodbc"
+
+notes:
+ - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
+ - "To alter this behavior you can use C(changed_when): [yes or no]."
+ - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
+'''
+
+EXAMPLES = '''
+- name: Set some values in the test db
+ community.general.odbc:
+ dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
+ query: "Select * from table_a where column1 = ?"
+ params:
+ - "value1"
+ commit: false
+ changed_when: no
+'''
+
+RETURN = '''
+results:
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
+description:
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
+row_count:
+ description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+HAS_PYODBC = None
+try:
+ import pyodbc
+ HAS_PYODBC = True
+except ImportError as e:
+ HAS_PYODBC = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dsn=dict(type='str', required=True, no_log=True),
+ query=dict(type='str', required=True),
+ params=dict(type='list', elements='str'),
+ commit=dict(type='bool', default=True),
+ ),
+ )
+
+ dsn = module.params.get('dsn')
+ query = module.params.get('query')
+ params = module.params.get('params')
+ commit = module.params.get('commit')
+
+ if not HAS_PYODBC:
+ module.fail_json(msg=missing_required_lib('pyodbc'))
+
+ # Try to make a connection with the DSN
+ connection = None
+ try:
+ connection = pyodbc.connect(dsn)
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
+
+ result = dict(
+ changed=True,
+ description=[],
+ row_count=-1,
+ results=[],
+ )
+
+ try:
+ cursor = connection.cursor()
+
+ if params:
+ cursor.execute(query, params)
+ else:
+ cursor.execute(query)
+ if commit:
+ cursor.commit()
+ try:
+ # Get the rows out into an 2d array
+ for row in cursor.fetchall():
+ new_row = []
+ for column in row:
+ new_row.append("{0}".format(column))
+ result['results'].append(new_row)
+
+ # Return additional information from the cursor
+ for row_description in cursor.description:
+ description = {}
+ description['name'] = row_description[0]
+ description['type'] = row_description[1].__name__
+ description['display_size'] = row_description[2]
+ description['internal_size'] = row_description[3]
+ description['precision'] = row_description[4]
+ description['scale'] = row_description[5]
+ description['nullable'] = row_description[6]
+ result['description'].append(description)
+
+ result['row_count'] = cursor.rowcount
+ except pyodbc.ProgrammingError as pe:
+ pass
+ except Exception as e:
+ module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
+
+ cursor.close()
+ except Exception as e:
+ module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
+ finally:
+ connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
new file mode 100644
index 00000000..2574a750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: office_365_connector_card
+short_description: Use webhooks to create Connector Card messages within an Office 365 group
+description:
+ - Creates Connector Card messages through
+ - Office 365 Connectors U(https://dev.outlook.com/Connectors)
+author: "Marc Sensenich (@marc-sensenich)"
+notes:
+ - This module is not idempotent, therefore if the same task is run twice
+ there will be two Connector Cards created
+options:
+ webhook:
+ type: str
+ description:
+ - The webhook URL is given to you when you create a new Connector.
+ required: true
+ summary:
+ type: str
+ description:
+ - A string used for summarizing card content.
+ - This will be shown as the message subject.
+ - This is required if the text parameter isn't populated.
+ color:
+ type: str
+ description:
+ - Accent color used for branding or indicating status in the card.
+ title:
+ type: str
+ description:
+ - A title for the Connector message. Shown at the top of the message.
+ text:
+ type: str
+ description:
+ - The main text of the card.
+ - This will be rendered below the sender information and optional title,
+ - and above any sections or actions present.
+ actions:
+ type: list
+ description:
+ - This array of objects will power the action links
+ - found at the bottom of the card.
+ sections:
+ type: list
+ description:
+ - Contains a list of sections to display in the card.
+ - For more information see https://dev.outlook.com/Connectors/reference.
+'''
+
+EXAMPLES = """
+- name: Create a simple Connector Card
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ text: 'Hello, World!'
+
+- name: Create a Connector Card with the full format
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ summary: This is the summary property
+ title: This is the **card's title** property
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ color: E81123
+ sections:
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
+ actions:
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its
+ **startGroup** property to true.
+"""
+
+RETURN = """
+"""
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
+OFFICE_365_CARD_TYPE = "MessageCard"
+OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
+OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
+
+
+def build_actions(actions):
+ action_items = []
+
+ for action in actions:
+ action_item = snake_dict_to_camel_dict(action)
+ action_items.append(action_item)
+
+ return action_items
+
+
+def build_sections(sections):
+ sections_created = []
+
+ for section in sections:
+ sections_created.append(build_section(section))
+
+ return sections_created
+
+
+def build_section(section):
+ section_payload = dict()
+
+ if 'title' in section:
+ section_payload['title'] = section['title']
+
+ if 'start_group' in section:
+ section_payload['startGroup'] = section['start_group']
+
+ if 'activity_image' in section:
+ section_payload['activityImage'] = section['activity_image']
+
+ if 'activity_title' in section:
+ section_payload['activityTitle'] = section['activity_title']
+
+ if 'activity_subtitle' in section:
+ section_payload['activitySubtitle'] = section['activity_subtitle']
+
+ if 'activity_text' in section:
+ section_payload['activityText'] = section['activity_text']
+
+ if 'hero_image' in section:
+ section_payload['heroImage'] = section['hero_image']
+
+ if 'text' in section:
+ section_payload['text'] = section['text']
+
+ if 'facts' in section:
+ section_payload['facts'] = section['facts']
+
+ if 'images' in section:
+ section_payload['images'] = section['images']
+
+ if 'actions' in section:
+ section_payload['potentialAction'] = build_actions(section['actions'])
+
+ return section_payload
+
+
+def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
+ payload = dict()
+ payload['@context'] = OFFICE_365_CARD_CONTEXT
+ payload['@type'] = OFFICE_365_CARD_TYPE
+
+ if summary is not None:
+ payload['summary'] = summary
+
+ if color is not None:
+ payload['themeColor'] = color
+
+ if title is not None:
+ payload['title'] = title
+
+ if text is not None:
+ payload['text'] = text
+
+ if actions:
+ payload['potentialAction'] = build_actions(actions)
+
+ if sections:
+ payload['sections'] = build_sections(sections)
+
+ payload = module.jsonify(payload)
+ return payload
+
+
+def do_notify_connector_card_webhook(module, webhook, payload):
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ response, info = fetch_url(
+ module=module,
+ url=webhook,
+ headers=headers,
+ method='POST',
+ data=payload
+ )
+
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == 400 and module.check_mode:
+ if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
+ else:
+ module.fail_json(
+ msg="failed to send %s as a connector card to Incoming Webhook: %s"
+ % (payload, info['msg'])
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook=dict(required=True, no_log=True),
+ summary=dict(type='str'),
+ color=dict(type='str'),
+ title=dict(type='str'),
+ text=dict(type='str'),
+ actions=dict(type='list'),
+ sections=dict(type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ webhook = module.params['webhook']
+ summary = module.params['summary']
+ color = module.params['color']
+ title = module.params['title']
+ text = module.params['text']
+ actions = module.params['actions']
+ sections = module.params['sections']
+
+ payload = build_payload_for_connector_card(
+ module,
+ summary,
+ color,
+ title,
+ text,
+ actions,
+ sections)
+
+ if module.check_mode:
+ # In check mode, send an empty payload to validate connection
+ check_mode_payload = build_payload_for_connector_card(module)
+ do_notify_connector_card_webhook(module, webhook, check_mode_payload)
+
+ do_notify_connector_card_webhook(module, webhook, payload)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py
new file mode 100644
index 00000000..64092fd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
+ (U(https://docs.chef.io/ohai.html)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py
new file mode 100644
index 00000000..4e6738cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# copyright: (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: omapi_host
+short_description: Setup OMAPI hosts.
+description: Manage OMAPI hosts into compatible DHCPd servers
+requirements:
+ - pypureomapi
+author:
+- Loic Blot (@nerzhul)
+options:
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if state=present).
+ type: str
+ aliases: [ name ]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: no
+
+'''
+EXAMPLES = r'''
+- name: Add a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.98.4.55
+ macaddr: 44:dd:ab:dd:11:44
+ name: server01
+ ip: 192.168.88.99
+ ddns: yes
+ statements:
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
+ state: present
+
+- name: Remove a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.1.1.1
+ macaddr: 00:66:ab:dd:11:44
+ state: absent
+'''
+
+RETURN = r'''
+lease:
+ description: dictionary containing host information
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: hardware type, generally '1'
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: hostname
+ returned: success
+ type: str
+ sample: 'mydesktop'
+'''
+
+import binascii
+import socket
+import struct
+import traceback
+
+PUREOMAPI_IMP_ERR = None
+try:
+ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
+ from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
+ from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
+ pureomapi_found = True
+except ImportError:
+ PUREOMAPI_IMP_ERR = traceback.format_exc()
+ pureomapi_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+
+
+class OmapiHostManager:
+ def __init__(self, module):
+ self.module = module
+ self.omapi = None
+ self.connect()
+
+ def connect(self):
+ try:
+ self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']),
+ self.module.params['key'])
+ except binascii.Error:
+ self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
+ except OmapiError as e:
+ self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
+ "are valid. Exception was: %s" % to_native(e))
+ except socket.error as e:
+ self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
+
+ def get_host(self, macaddr):
+ msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
+ msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
+ msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ return None
+ return response
+
+ @staticmethod
+ def unpack_facts(obj):
+ result = dict(obj)
+ if 'hardware-address' in result:
+ result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')]))
+
+ if 'ip-address' in result:
+ result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')]))
+
+ if 'hardware-type' in result:
+ result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')])
+
+ return result
+
+ def setup_host(self):
+ if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
+ self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
+
+ msg = None
+ host_response = self.get_host(self.module.params['macaddr'])
+ # If host was not found using macaddr, add create message
+ if host_response is None:
+ msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
+ msg.message.append((to_bytes('create'), struct.pack('!I', 1)))
+ msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr'])))
+ msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname'])))
+ if self.module.params['ip'] is not None:
+ msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
+
+ stmt_join = ""
+ if self.module.params['ddns']:
+ stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
+
+ try:
+ if len(self.module.params['statements']) > 0:
+ stmt_join += "; ".join(self.module.params['statements'])
+ stmt_join += "; "
+ except TypeError as e:
+ self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
+
+ if len(stmt_join) > 0:
+ msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+ # Forge update message
+ else:
+ response_obj = self.unpack_facts(host_response.obj)
+ fields_to_update = {}
+
+ if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
+
+ # Name cannot be changed
+ if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
+ self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
+ "Please delete host and add new." %
+ (response_obj['name'], self.module.params['hostname']))
+
+ """
+ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
+ if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
+ response_obj['statements'] != self.module.params['statements']:
+ with open('/tmp/omapi', 'w') as fb:
+ for (k,v) in iteritems(response_obj):
+ fb.writelines('statements: %s %s\n' % (k, v))
+ """
+ if len(fields_to_update) == 0:
+ self.module.exit_json(changed=False, lease=response_obj)
+ else:
+ msg = OmapiMessage.update(host_response.handle)
+ msg.update_object(fields_to_update)
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_STATUS:
+ self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True)
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+ def remove_host(self):
+ try:
+ self.omapi.del_host(self.module.params['macaddr'])
+ self.module.exit_json(changed=True)
+ except OmapiErrorNotFound:
+ self.module.exit_json()
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ host=dict(type='str', default="localhost"),
+ port=dict(type='int', default=7911),
+ key_name=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=True),
+ macaddr=dict(type='str', required=True),
+ hostname=dict(type='str', aliases=['name']),
+ ip=dict(type='str'),
+ ddns=dict(type='bool', default=False),
+ statements=dict(type='list', default=[]),
+ ),
+ supports_check_mode=False,
+ )
+
+ if not pureomapi_found:
+ module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
+
+ if module.params['key'] is None or len(module.params["key"]) == 0:
+ module.fail_json(msg="'key' parameter cannot be empty.")
+
+ if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
+ module.fail_json(msg="'key_name' parameter cannot be empty.")
+
+ host_manager = OmapiHostManager(module)
+ try:
+ if module.params['state'] == 'present':
+ host_manager.setup_host()
+ elif module.params['state'] == 'absent':
+ host_manager.remove_host()
+ except ValueError as e:
+ module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py
new file mode 100644
index 00000000..68fbb1e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.2
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_device_info
+short_description: Retrieves the information about Device.
+description:
+ - This module retrieves the list of all devices information with the exhaustive inventory of each
+ device.
+options:
+ hostname:
+ description:
+ - Target IP Address or hostname.
+ type: str
+ required: True
+ username:
+ description:
+ - Target username.
+ type: str
+ required: True
+ password:
+ description:
+ - Target user password.
+ type: str
+ required: True
+ port:
+ description:
+ - Target HTTPS port.
+ type: int
+ default: 443
+ fact_subset:
+ description:
+ - C(basic_inventory) returns the list of the devices.
+ - C(detailed_inventory) returns the inventory details of specified devices.
+ - C(subsystem_health) returns the health status of specified devices.
+ type: str
+ choices: [basic_inventory, detailed_inventory, subsystem_health ]
+ default: basic_inventory
+ system_query_options:
+ description:
+ - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
+ is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
+ type: dict
+ suboptions:
+ device_id:
+ description:
+ - A list of unique identifier is applicable
+ for C(detailed_inventory) and C(subsystem_health).
+ type: list
+ device_service_tag:
+ description:
+ - A list of service tags are applicable for C(detailed_inventory)
+ and C(subsystem_health).
+ type: list
+ inventory_type:
+ description:
+ - For C(detailed_inventory), it returns details of the specified inventory type.
+ type: str
+ filter:
+ description:
+ - For C(basic_inventory), it filters the collection of devices.
+ I(filter) query format should be aligned with OData standards.
+ type: str
+
+requirements:
+ - "python >= 2.7.5"
+author: "Sajna Shetty(@Sajna-Shetty)"
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve basic inventory of all devices.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+
+- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "basic_inventory"
+ system_query_options:
+ filter: "Id eq 33333 or Id eq 11111"
+
+- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ - 22222
+
+- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+ inventory_type: "serverDeviceCards"
+
+- name: Retrieve subsystem health of specified devices identified by service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "subsystem_health"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to fetch the device information"
+device_info:
+ type: dict
+ description: Returns the information collected from the Device.
+ returned: success
+ sample: {
+ "value": [
+ {
+ "Actions": null,
+ "AssetTag": null,
+ "ChassisServiceTag": null,
+ "ConnectionState": true,
+ "DeviceManagement": [
+ {
+ "DnsName": "dnsname.host.com",
+ "InstrumentationName": "MX-12345",
+ "MacAddress": "11:10:11:10:11:10",
+ "ManagementId": 12345,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 12345,
+ "ManagementProfileId": 12345,
+ "ManagementURL": "https://192.168.0.1:443",
+ "Status": 1000,
+ "StatusDateTime": "2019-01-21 06:30:08.501"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "192.168.0.1"
+ }
+ ],
+ "DeviceName": "MX-0003I",
+ "DeviceServiceTag": "MXL1234",
+ "DeviceSubscription": null,
+ "LastInventoryTime": "2019-01-21 06:30:08.501",
+ "LastStatusTime": "2019-01-21 06:30:02.492",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX7000",
+ "PowerState": 17,
+ "SlotConfiguration": {},
+ "Status": 4000,
+ "SystemId": 2031,
+ "Type": 2000
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICES_INVENTORY_DETAILS = "detailed_inventory"
+DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
+DEVICES_INVENTORY_TYPE = "inventory_type"
+DEVICE_LIST = "basic_inventory"
+DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
+device_fact_error_report = {}
+
+DEVICE_RESOURCE_COLLECTION = {
+ DEVICE_LIST: {"resource": "DeviceService/Devices"},
+ DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
+ DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
+ DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
+}
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ try:
+ path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]
+ resp = rest_obj.invoke_request('GET', path)
+ if resp.success:
+ devices_list = resp.json_data["value"]
+ service_tag_dict = {}
+ for item in devices_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ available_service_tags = service_tag_dict.values()
+ not_available_service_tag = list(set(service_tags) - set(available_service_tags))
+ device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag))
+ else:
+ raise ValueError(resp.json_data)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return service_tag_dict
+
+
+def is_int(val):
+ """check when device_id numeric represented value is int"""
+ try:
+ int(val)
+ return True
+ except ValueError:
+ return False
+
+
+def _check_duplicate_device_id(device_id_list, service_tag_dict):
+ """If service_tag is duplicate of device_id, then updates the message as Duplicate report
+ :arg1: device_id_list : list of device_id
+ :arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
+ if device_id_list:
+ device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
+ common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
+ for device_id in common_val:
+ device_fact_error_report.update(
+ {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
+ del service_tag_dict[device_id]
+
+
+def _get_device_identifier_map(module_params, rest_obj):
+ """
+ Builds the identifiers mapping
+ :returns: the dict of device_id to server_tag map
+ eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
+ system_query_options_param = module_params.get("system_query_options")
+ device_id_service_tag_dict = {}
+ if system_query_options_param is not None:
+ device_id_list = system_query_options_param.get("device_id")
+ device_service_tag_list = system_query_options_param.get("device_service_tag")
+ if device_id_list:
+ device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
+ device_id_service_tag_dict["device_id"] = device_id_dict
+ if device_service_tag_list:
+ service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
+ rest_obj)
+
+ _check_duplicate_device_id(device_id_list, service_tag_dict)
+ device_id_service_tag_dict["device_service_tag"] = service_tag_dict
+ return device_id_service_tag_dict
+
+
+def _get_query_parameters(module_params):
+ """
+ Builds query parameter
+ :returns: dictionary, which is applicable builds the query format
+ eg : {"$filter":"Type eq 2000"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = None
+ if system_query_options_param:
+ filter_by_val = system_query_options_param.get("filter")
+ if filter_by_val:
+ query_parameter = {"$filter": filter_by_val}
+ return query_parameter
+
+
+def _get_resource_parameters(module_params, rest_obj):
+ """
+ Identifies the resource path by different states
+ :returns: dictionary containing identifier with respective resource path
+ eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
+ """
+ fact_subset = module_params["fact_subset"]
+ path_dict = {}
+ if fact_subset != DEVICE_LIST:
+ inventory_type = None
+ device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
+ if fact_subset == DEVICES_INVENTORY_DETAILS:
+ system_query_options = module_params.get("system_query_options")
+ inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
+ path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
+ for identifier_type, identifier_dict in device_id_service_tag_dict.items():
+ path_dict[identifier_type] = {}
+ for device_id, service_tag in identifier_dict.items():
+ key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
+ path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
+ InventoryType=inventory_type)
+ path_dict[identifier_type].update({key_identifier: path})
+ else:
+ path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
+ return path_dict
+
+
+def _check_mutually_inclusive_arguments(val, module_params, required_args):
+ """"
+ Throws error if arguments detailed_inventory, subsystem_health
+ not exists with qualifier device_id or device_service_tag"""
+ system_query_options_param = module_params.get("system_query_options")
+ if system_query_options_param is None or (system_query_options_param is not None and not any(
+ system_query_options_param.get(qualifier) for qualifier in required_args)):
+ raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
+
+
+def _validate_inputs(module_params):
+ """validates input parameters"""
+ fact_subset = module_params["fact_subset"]
+ if fact_subset != "basic_inventory":
+ _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
+
+
+def main():
+ system_query_options = {"type": 'dict', "required": False, "options": {
+ "device_id": {"type": 'list'},
+ "device_service_tag": {"type": 'list'},
+ "inventory_type": {"type": 'str'},
+ "filter": {"type": 'str', "required": False},
+ }}
+
+ module = AnsibleModule(
+ argument_spec={
+ "hostname": {"required": True, "type": 'str'},
+ "username": {"required": True, "type": 'str'},
+ "password": {"required": True, "type": 'str', "no_log": True},
+ "port": {"required": False, "default": 443, "type": 'int'},
+ "fact_subset": {"required": False, "default": "basic_inventory",
+ "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
+ "system_query_options": system_query_options,
+ },
+ required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
+ ['fact_subset', 'subsystem_health', ['system_query_options']], ],
+ supports_check_mode=False)
+
+ try:
+ _validate_inputs(module.params)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_facts = _get_resource_parameters(module.params, rest_obj)
+ resp_status = []
+ if device_facts.get("basic_inventory"):
+ query_param = _get_query_parameters(module.params)
+ resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
+ device_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ for identifier_type, path_dict_map in device_facts.items():
+ for identifier, path in path_dict_map.items():
+ try:
+ resp = rest_obj.invoke_request('GET', path)
+ data = resp.json_data
+ resp_status.append(resp.status_code)
+ except HTTPError as err:
+ data = str(err)
+ path_dict_map[identifier] = data
+ if any(device_fact_error_report):
+ if "device_service_tag" in device_facts:
+ device_facts["device_service_tag"].update(device_fact_error_report)
+ else:
+ device_facts["device_service_tag"] = device_fact_error_report
+ if 200 in resp_status:
+ module.exit_json(device_info=device_facts)
+ else:
+ module.fail_json(msg="Failed to fetch the device information")
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py
new file mode 100644
index 00000000..efe1ce22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+#
+# Copyright 2018 www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: one_host
+
+short_description: Manages OpenNebula Hosts
+
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula Hosts"
+
+options:
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If C(absent) the host will be deleted from the cluster.
+ - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
+ - If C(enabled) the host is fully operational.
+ - C(disabled), e.g. to perform maintenance operations.
+ - C(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
+
+extends_documentation_fragment:
+- community.general.opennebula
+
+
+author:
+ - Rafael del Valle (@rvalle)
+'''
+
+EXAMPLES = '''
+- name: Create a new host in OpenNebula
+ community.general.one_host:
+ name: host1
+ cluster_id: 1
+ api_url: http://127.0.0.1:2633/RPC2
+
+- name: Create a host and adjust its template
+ community.general.one_host:
+ name: host2
+ cluster_name: default
+ template:
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+'''
+
+# TODO: pending setting guidelines on returned values
+RETURN = '''
+'''
+
+# TODO: Documentation on valid state transitions is required to properly implement all valid cases
+# TODO: To be coherent with CLI this module should also provide "flush" functionality
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+try:
+ from pyone import HOST_STATES, HOST_STATUS
+except ImportError:
+ pass # handled at module utils
+
+
+# Pseudo definitions...
+
+HOST_ABSENT = -99 # the host is absent (special case defined by this module)
+
+
+class HostModule(OpenNebulaModule):
+
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
+ im_mad_name=dict(type='str', default="kvm"),
+ vmm_mad_name=dict(type='str', default="kvm"),
+ cluster_id=dict(type='int', default=0),
+ cluster_name=dict(type='str'),
+ labels=dict(type='list'),
+ template=dict(type='dict', aliases=['attributes']),
+ )
+
+ mutually_exclusive = [
+ ['cluster_id', 'cluster_name']
+ ]
+
+ OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
+
+ def allocate_host(self):
+ """
+ Creates a host entry in OpenNebula
+ Returns: True on success, fails otherwise.
+
+ """
+ if not self.one.host.allocate(self.get_parameter('name'),
+ self.get_parameter('vmm_mad_name'),
+ self.get_parameter('im_mad_name'),
+ self.get_parameter('cluster_id')):
+ self.fail(msg="could not allocate host")
+ else:
+ self.result['changed'] = True
+ return True
+
+ def wait_for_host_state(self, host, target_states):
+ """
+ Utility method that waits for a host state.
+ Args:
+ host:
+ target_states:
+
+ """
+ return self.wait_for_state('host',
+ lambda: self.one.host.info(host.ID).STATE,
+ lambda s: HOST_STATES(s).name, target_states,
+ invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
+
+ def run(self, one, module, result):
+
+ # Get the list of hosts
+ host_name = self.get_parameter("name")
+ host = self.get_host_by_name(host_name)
+
+ # manage host state
+ desired_state = self.get_parameter('state')
+ if bool(host):
+ current_state = host.STATE
+ current_state_name = HOST_STATES(host.STATE).name
+ else:
+ current_state = HOST_ABSENT
+ current_state_name = "ABSENT"
+
+ # apply properties
+ if desired_state == 'present':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
+ self.fail(msg="invalid host state %s" % current_state_name)
+
+ elif desired_state == 'enabled':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.ENABLED):
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not enable host")
+ elif current_state in [HOST_STATES.MONITORED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
+
+ elif desired_state == 'disabled':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be put in disabled state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.DISABLED):
+ self.wait_for_host_state(host, [HOST_STATES.DISABLED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not disable host")
+ elif current_state in [HOST_STATES.DISABLED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
+
+ elif desired_state == 'offline':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be placed in offline state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
+ if one.host.status(host.ID, HOST_STATUS.OFFLINE):
+ self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not set host offline")
+ elif current_state in [HOST_STATES.OFFLINE]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
+
+ elif desired_state == 'absent':
+ if current_state != HOST_ABSENT:
+ if one.host.delete(host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="could not delete host from cluster")
+
+ # if we reach this point we can assume that the host was taken to the desired state
+
+ if desired_state != "absent":
+ # manipulate or modify the template
+ desired_template_changes = self.get_parameter('template')
+
+ if desired_template_changes is None:
+ desired_template_changes = dict()
+
+ # complete the template with specific ansible parameters
+ if self.is_parameter('labels'):
+ desired_template_changes['LABELS'] = self.get_parameter('labels')
+
+ if self.requires_template_update(host.TEMPLATE, desired_template_changes):
+ # setup the root element so that pyone will generate XML instead of attribute vector
+ desired_template_changes = {"TEMPLATE": desired_template_changes}
+ if one.host.update(host.ID, desired_template_changes, 1): # merge the template
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host template")
+
+ # the cluster
+ if host.CLUSTER_ID != self.get_parameter('cluster_id'):
+ if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host cluster")
+
+ # return
+ self.exit()
+
+
+def main():
+ HostModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py
new file mode 100644
index 00000000..867bab62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image
+short_description: Manages OpenNebula images
+description:
+ - Manages OpenNebula images
+requirements:
+ - python-oca
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ id:
+ description:
+ - A C(id) of the image you would like to manage.
+ type: int
+ name:
+ description:
+ - A C(name) of the image you would like to manage.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the image
+ - C(absent) - delete the image
+ - C(cloned) - clone the image
+ - C(renamed) - rename the image to the C(new_name)
+ choices: ["present", "absent", "cloned", "renamed"]
+ default: present
+ type: str
+ enabled:
+ description:
+ - Whether the image should be enabled or disabled.
+ type: bool
+ new_name:
+ description:
+ - A name that will be assigned to the existing or new image.
+ - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the IMAGE by id
+ community.general.one_image:
+ id: 45
+ register: result
+
+- name: Print the IMAGE properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Rename existing IMAGE
+ community.general.one_image:
+ id: 34
+ state: renamed
+ new_name: bar-image
+
+- name: Disable the IMAGE by id
+ community.general.one_image:
+ id: 37
+ enabled: no
+
+- name: Enable the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ enabled: yes
+
+- name: Clone the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ state: cloned
+ new_name: bar-image-clone
+ register: result
+
+- name: Delete the IMAGE by id
+ community.general.one_image:
+ id: '{{ result.id }}'
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: image id
+ type: int
+ returned: success
+ sample: 153
+name:
+ description: image name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: image's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: image's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: image's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: image's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of image instance
+ type: str
+ returned: success
+ sample: READY
+used:
+ description: is image in use
+ type: bool
+ returned: success
+ sample: true
+running_vms:
+ description: count of running vms that use this image
+ type: int
+ returned: success
+ sample: 7
+'''
+
+try:
+ import oca
+ HAS_OCA = True
+except ImportError:
+ HAS_OCA = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_image(module, client, predicate):
+ pool = oca.ImagePool(client)
+ # Filter -2 means fetch all images user can Use
+ pool.info(filter=-2)
+
+ for image in pool:
+ if predicate(image):
+ return image
+
+ return None
+
+
+def get_image_by_name(module, client, image_name):
+ return get_image(module, client, lambda image: (image.name == image_name))
+
+
+def get_image_by_id(module, client, image_id):
+ return get_image(module, client, lambda image: (image.id == image_id))
+
+
+def get_image_instance(module, client, requested_id, requested_name):
+ if requested_id:
+ return get_image_by_id(module, client, requested_id)
+ else:
+ return get_image_by_name(module, client, requested_name)
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ image.info()
+
+ info = {
+ 'id': image.id,
+ 'name': image.name,
+ 'state': IMAGE_STATES[image.state],
+ 'running_vms': image.running_vms,
+ 'used': bool(image.running_vms),
+ 'user_name': image.uname,
+ 'user_id': image.uid,
+ 'group_name': image.gname,
+ 'group_id': image.gid,
+ }
+
+ return info
+
+
+def wait_for_state(module, image, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ image.info()
+ state = image.state
+
+ if state_predicate(state):
+ return image
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_ready(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
+
+
+def wait_for_delete(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
+
+
+def enable_image(module, client, image, enable):
+ image.info()
+ changed = False
+
+ state = image.state
+
+ if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
+ if enable:
+ module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
+ else:
+ module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
+
+ if ((enable and state != IMAGE_STATES.index('READY')) or
+ (not enable and state != IMAGE_STATES.index('DISABLED'))):
+ changed = True
+
+ if changed and not module.check_mode:
+ client.call('image.enable', image.id, enable)
+
+ result = get_image_info(image)
+ result['changed'] = changed
+
+ return result
+
+
+def clone_image(module, client, image, new_name):
+ if new_name is None:
+ new_name = "Copy of " + image.name
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ result = get_image_info(tmp_image)
+ result['changed'] = False
+ return result
+
+ if image.state == IMAGE_STATES.index('DISABLED'):
+ module.fail_json(msg="Cannot clone DISABLED image")
+
+ if not module.check_mode:
+ new_id = client.call('image.clone', image.id, new_name)
+ image = get_image_by_id(module, client, new_id)
+ wait_for_ready(module, image)
+
+ result = get_image_info(image)
+ result['changed'] = True
+
+ return result
+
+
+def rename_image(module, client, image, new_name):
+ if new_name is None:
+ module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
+
+ if new_name == image.name:
+ result = get_image_info(image)
+ result['changed'] = False
+ return result
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
+
+ if not module.check_mode:
+ client.call('image.rename', image.id, new_name)
+
+ result = get_image_info(image)
+ result['changed'] = True
+ return result
+
+
+def delete_image(module, client, image):
+
+ if not image:
+ return {'changed': False}
+
+ if image.running_vms > 0:
+ module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
+
+ if not module.check_mode:
+ client.call('image.delete', image.id)
+ wait_for_delete(module, image)
+
+ return {'changed': True}
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "id": {"required": False, "type": "int"},
+ "name": {"required": False, "type": "str"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'cloned', 'renamed'],
+ "type": "str"
+ },
+ "enabled": {"required": False, "type": "bool"},
+ "new_name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['id', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_OCA:
+ module.fail_json(msg='This module requires python-oca to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ state = params.get('state')
+ enabled = params.get('enabled')
+ new_name = params.get('new_name')
+ client = oca.Client(auth.username + ':' + auth.password, auth.url)
+
+ result = {}
+
+ if not id and state == 'renamed':
+ module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
+
+ image = get_image_instance(module, client, id, name)
+ if not image and state != 'absent':
+ if id:
+ module.fail_json(msg="There is no image with id=" + str(id))
+ else:
+ module.fail_json(msg="There is no image with name=" + name)
+
+ if state == 'absent':
+ result = delete_image(module, client, image)
+ else:
+ result = get_image_info(image)
+ changed = False
+ result['changed'] = False
+
+ if enabled is not None:
+ result = enable_image(module, client, image, enabled)
+ if state == "cloned":
+ result = clone_image(module, client, image, new_name)
+ elif state == "renamed":
+ result = rename_image(module, client, image, new_name)
+
+ changed = changed or result['changed']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py
new file mode 100644
index 00000000..68f8398f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py
@@ -0,0 +1,768 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_service
+short_description: Deploy and manage OpenNebula services
+description:
+ - Manage OpenNebula services
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula OneFlow API server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the ONEFLOW_URL environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ type: str
+ template_name:
+ description:
+ - Name of service template to use to create a new instance of a service
+ type: str
+ template_id:
+ description:
+ - ID of a service template to use to create a new instance of a service
+ type: int
+ service_id:
+ description:
+ - ID of a service instance that you would like to manage
+ type: int
+ service_name:
+ description:
+ - Name of a service instance that you would like to manage
+ type: str
+ unique:
+ description:
+ - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when
+ - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below.
+ type: bool
+ default: no
+ state:
+ description:
+ - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name).
+ - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name).
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ mode:
+ description:
+ - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the service
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the service
+ type: int
+ wait:
+ description:
+ - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ custom_attrs:
+ description:
+ - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ default: {}
+ type: dict
+ role:
+ description:
+ - Name of the role whose cardinality should be changed
+ type: str
+ cardinality:
+ description:
+ - Number of VMs for the specified role
+ type: int
+ force:
+ description:
+ - Force the new cardinality even if it is outside the limits
+ type: bool
+ default: no
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Instantiate a new service
+ community.general.one_service:
+ template_id: 90
+ register: result
+
+- name: Print service properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Instantiate a new service with specified service_name, service group and mode
+ community.general.one_service:
+ template_name: 'app1_template'
+ service_name: 'app1'
+ group_id: 1
+ mode: '660'
+
+- name: Instantiate a new service with template_id and pass custom_attrs dict
+ community.general.one_service:
+ template_id: 90
+ custom_attrs:
+ public_network_id: 21
+ private_network_id: 26
+
+- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
+ community.general.one_service:
+ template_id: 53
+ service_name: 'foo'
+ unique: yes
+
+- name: Delete a service by ID
+ community.general.one_service:
+ service_id: 153
+ state: absent
+
+- name: Get service info
+ community.general.one_service:
+ service_id: 153
+ register: service_info
+
+- name: Change service owner, group and mode
+ community.general.one_service:
+ service_name: 'app2'
+ owner_id: 34
+ group_id: 113
+ mode: '600'
+
+- name: Instantiate service and wait for it to become RUNNING
+ community.general.one_service:
+ template_id: 43
+ service_name: 'foo1'
+
+- name: Wait service to become RUNNING
+ community.general.one_service:
+ service_id: 112
+ wait: yes
+
+- name: Change role cardinality
+ community.general.one_service:
+ service_id: 153
+ role: bar
+ cardinality: 5
+
+- name: Change role cardinality and wait for it to be applied
+ community.general.one_service:
+ service_id: 112
+ role: foo
+ cardinality: 7
+ wait: yes
+'''
+
+RETURN = '''
+service_id:
+ description: service id
+ type: int
+ returned: success
+ sample: 153
+service_name:
+ description: service name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: service's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: service's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: service's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: service's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of service instance
+ type: str
+ returned: success
+ sample: RUNNING
+mode:
+ description: service's mode
+ type: int
+ returned: success
+ sample: 660
+roles:
+ description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
+ type: list
+ returned: success
+ sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]},
+ {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]'
+'''
+
+import os
+import sys
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
+ "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
+
+
+def get_all_templates(module, auth):
+ try:
+ all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(all_templates.read())
+
+
+def get_template(module, auth, pred):
+ all_templates_dict = get_all_templates(module, auth)
+
+ found = 0
+ found_template = None
+ template_name = ''
+
+ if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
+ for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(template):
+ found = found + 1
+ found_template = template
+ template_name = template["NAME"]
+
+ if found <= 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg="There is no template with unique name: " + template_name)
+ else:
+ return found_template
+
+
+def get_all_services(module, auth):
+ try:
+ response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(response.read())
+
+
+def get_service(module, auth, pred):
+ all_services_dict = get_all_services(module, auth)
+
+ found = 0
+ found_service = None
+ service_name = ''
+
+ if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
+ for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(service):
+ found = found + 1
+ found_service = service
+ service_name = service["NAME"]
+
+ # fail if there are more services with same name
+ if found > 1:
+ module.fail_json(msg="There are multiple services with a name: '" +
+ service_name + "'. You have to use a unique service name or use 'service_id' instead.")
+ elif found <= 0:
+ return None
+ else:
+ return found_service
+
+
+def get_service_by_id(module, auth, service_id):
+ return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
+
+
+def get_service_by_name(module, auth, service_name):
+ return get_service(module, auth, lambda service: (service["NAME"] == service_name))
+
+
+def get_service_info(module, auth, service):
+
+ result = {
+ "service_id": int(service["ID"]),
+ "service_name": service["NAME"],
+ "group_id": int(service["GID"]),
+ "group_name": service["GNAME"],
+ "owner_id": int(service["UID"]),
+ "owner_name": service["UNAME"],
+ "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
+ }
+
+ roles_status = service["TEMPLATE"]["BODY"]["roles"]
+ roles = []
+ for role in roles_status:
+ nodes_ids = []
+ if "nodes" in role:
+ for node in role["nodes"]:
+ nodes_ids.append(node["deploy_id"])
+ roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
+
+ result["roles"] = roles
+ result["mode"] = int(parse_service_permissions(service))
+
+ return result
+
+
+def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
+ # make sure that the values in custom_attrs dict are strings
+ custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
+
+ data = {
+ "action": {
+ "perform": "instantiate",
+ "params": {
+ "merge_template": {
+ "custom_attrs_values": custom_attrs_with_str,
+ "name": service_name
+ }
+ }
+ }
+ }
+
+ try:
+ response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
+ data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ service_result = module.from_json(response.read())["DOCUMENT"]
+
+ return service_result
+
+
+def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
+
+ status_result = module.from_json(status_result.read())
+ service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
+
+ if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
+ return status_result["DOCUMENT"]
+ elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
+ log_message = ''
+ for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
+ if log_info["severity"] == "E":
+ log_message = log_message + log_info["message"]
+ break
+
+ module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired")
+
+
+def change_service_permissions(module, auth, service_id, permissions):
+
+ data = {
+ "action": {
+ "perform": "chmod",
+ "params": {"octet": permissions}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_owner(module, auth, service_id, owner_id):
+ data = {
+ "action": {
+ "perform": "chown",
+ "params": {"owner_id": owner_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_group(module, auth, service_id, group_id):
+
+ data = {
+ "action": {
+ "perform": "chgrp",
+ "params": {"group_id": group_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_role_cardinality(module, auth, service_id, role, cardinality, force):
+
+ data = {
+ "cardinality": cardinality,
+ "force": force
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if status_result.getcode() != 204:
+ module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
+
+
+def check_change_service_owner(module, service, owner_id):
+ old_owner_id = int(service["UID"])
+
+ return old_owner_id != owner_id
+
+
+def check_change_service_group(module, service, group_id):
+ old_group_id = int(service["GID"])
+
+ return old_group_id != group_id
+
+
+def parse_service_permissions(service):
+ perm_dict = service["PERMISSIONS"]
+ '''
+ This is the structure of the 'PERMISSIONS' dictionary:
+
+ "PERMISSIONS": {
+ "OWNER_U": "1",
+ "OWNER_M": "1",
+ "OWNER_A": "0",
+ "GROUP_U": "0",
+ "GROUP_M": "0",
+ "GROUP_A": "0",
+ "OTHER_U": "0",
+ "OTHER_M": "0",
+ "OTHER_A": "0"
+ }
+ '''
+
+ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
+ group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
+ other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def check_change_service_permissions(module, service, permissions):
+ old_permissions = parse_service_permissions(service)
+
+ return old_permissions != permissions
+
+
+def check_change_role_cardinality(module, service, role_name, cardinality):
+ roles_list = service["TEMPLATE"]["BODY"]["roles"]
+
+ for role in roles_list:
+ if role["name"] == role_name:
+ return int(role["cardinality"]) != cardinality
+
+ module.fail_json(msg="There is no role with name: " + role_name)
+
+
+def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
+ if not service_name:
+ service_name = ''
+ changed = False
+ service = None
+
+ if unique:
+ service = get_service_by_name(module, auth, service_name)
+
+ if not service:
+ if not module.check_mode:
+ service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
+ changed = True
+
+ # if check_mode=true and there would be changes, service doesn't exist and we can not get it
+ if module.check_mode and changed:
+ return {"changed": True}
+
+ result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
+ wait_timeout=wait_timeout, permissions=permissions, service=service)
+
+ if result["changed"]:
+ changed = True
+
+ result["changed"] = changed
+
+ return result
+
+
+def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
+ role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
+
+ changed = False
+
+ if not service:
+ service = get_service_by_id(module, auth, service_id)
+ else:
+ service_id = service["ID"]
+
+ if not service:
+ module.fail_json(msg="There is no service with id: " + str(service_id))
+
+ if owner_id:
+ if check_change_service_owner(module, service, owner_id):
+ if not module.check_mode:
+ change_service_owner(module, auth, service_id, owner_id)
+ changed = True
+ if group_id:
+ if check_change_service_group(module, service, group_id):
+ if not module.check_mode:
+ change_service_group(module, auth, service_id, group_id)
+ changed = True
+ if permissions:
+ if check_change_service_permissions(module, service, permissions):
+ if not module.check_mode:
+ change_service_permissions(module, auth, service_id, permissions)
+ changed = True
+
+ if role:
+ if check_change_role_cardinality(module, service, role, cardinality):
+ if not module.check_mode:
+ change_role_cardinality(module, auth, service_id, role, cardinality, force)
+ changed = True
+
+ if wait and not module.check_mode:
+ service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
+
+ # if something has changed, fetch service info again
+ if changed:
+ service = get_service_by_id(module, auth, service_id)
+
+ service_info = get_service_info(module, auth, service)
+ service_info["changed"] = changed
+
+ return service_info
+
+
+def delete_service(module, auth, service_id):
+ service = get_service_by_id(module, auth, service_id)
+ if not service:
+ return {"changed": False}
+
+ service_info = get_service_info(module, auth, service)
+
+ service_info["changed"] = True
+
+ if module.check_mode:
+ return service_info
+
+ try:
+ result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
+
+ return service_info
+
+
+def get_template_by_name(module, auth, template_name):
+ return get_template(module, auth, lambda template: (template["NAME"] == template_name))
+
+
+def get_template_by_id(module, auth, template_id):
+ return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
+
+
+def get_template_id(module, auth, requested_id, requested_name):
+ template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
+
+ if template:
+ return template["ID"]
+
+ return None
+
+
+def get_service_id_by_name(module, auth, service_name):
+ service = get_service_by_name(module, auth, service_name)
+
+ if service:
+ return service["ID"]
+
+ return None
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONEFLOW_URL')
+
+ if not username:
+ username = os.environ.get('ONEFLOW_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONEFLOW_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'user', 'password'))
+
+ return auth_params(url=url, user=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "service_name": {"required": False, "type": "str"},
+ "service_id": {"required": False, "type": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "unique": {"default": False, "type": "bool"},
+ "wait": {"default": False, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "custom_attrs": {"default": {}, "type": "dict"},
+ "role": {"required": False, "type": "str"},
+ "cardinality": {"required": False, "type": "int"},
+ "force": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'service_id'],
+ ['service_id', 'service_name'],
+ ['template_id', 'template_name', 'role'],
+ ['template_id', 'template_name', 'cardinality'],
+ ['service_id', 'custom_attrs']
+ ],
+ required_together=[['role', 'cardinality']],
+ supports_check_mode=True)
+
+ auth = get_connection_info(module)
+ params = module.params
+ service_name = params.get('service_name')
+ service_id = params.get('service_id')
+
+ requested_template_id = params.get('template_id')
+ requested_template_name = params.get('template_name')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ unique = params.get('unique')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ custom_attrs = params.get('custom_attrs')
+ role = params.get('role')
+ cardinality = params.get('cardinality')
+ force = params.get('force')
+
+ template_id = None
+
+ if requested_template_id or requested_template_name:
+ template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
+ if not template_id:
+ if requested_template_id:
+ module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ if unique and not service_name:
+ module.fail_json(msg="You cannot use unique without passing service_name!")
+
+ if template_id and state == 'absent':
+ module.fail_json(msg="State absent is not valid for template")
+
+ if template_id and state == 'present': # Instantiate a service
+ result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
+ group_id, permissions, custom_attrs, unique, wait, wait_timeout)
+ else:
+ if not (service_id or service_name):
+ module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
+ if custom_attrs:
+ module.fail_json(msg="You can only set custom_attrs when instantiate service!")
+
+ if not service_id:
+ service_id = get_service_id_by_name(module, auth, service_name)
+ # The task should be failed when we want to manage a non-existent service identified by its name
+ if not service_id and state == 'present':
+ module.fail_json(msg="There is no service with name: " + service_name)
+
+ if state == 'absent':
+ result = delete_service(module, auth, service_id)
+ else:
+ result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py
new file mode 100644
index 00000000..286514bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py
@@ -0,0 +1,1599 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_vm
+short_description: Creates or terminates OpenNebula instances
+description:
+ - Manages OpenNebula instances
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - if both I(api_username) or I(api_password) are not set, then it will try
+ - authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable C(ONE_AUTH) to override this path.
+ type: str
+ template_name:
+ description:
+ - Name of VM template to use to create a new instace
+ type: str
+ template_id:
+ description:
+ - ID of a VM template to use to create a new instance
+ type: int
+ vm_start_on_hold:
+ description:
+ - Set to true to put vm on hold while creating
+ default: False
+ type: bool
+ instance_ids:
+ description:
+ - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ aliases: ['ids']
+ type: list
+ state:
+ description:
+ - C(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - C(running) - run instances
+ - C(poweredoff) - power-off instances
+ - C(rebooted) - reboot instances
+ - C(absent) - terminate instances
+ choices: ["present", "absent", "running", "rebooted", "poweredoff"]
+ default: present
+ type: str
+ hard:
+ description:
+ - Reboot, power-off or terminate instances C(hard)
+ default: no
+ type: bool
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning. Keep
+ - in mind if you are waiting for instance to be in running state it
+ - doesn't mean that you will be able to SSH on that machine only that
+ - boot process have started on that instance, see 'wait_for' example for
+ - details.
+ default: yes
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ attributes:
+ description:
+ - A dictionary of key/value attributes to add to new instances, or for
+ - setting C(state) of instances with these attributes.
+ - Keys are case insensitive and OpenNebula automatically converts them to upper case.
+ - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
+ - indexes to the names of VMs.
+ - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
+ - When used with C(count_attributes) and C(exact_count) the module will
+ - match the base name without the index part.
+ default: {}
+ type: dict
+ labels:
+ description:
+ - A list of labels to associate with new instances, or for setting
+ - C(state) of instances with these labels.
+ default: []
+ type: list
+ count_attributes:
+ description:
+ - A dictionary of key/value attributes that can only be used with
+ - C(exact_count) to determine how many nodes based on a specific
+ - attributes criteria should be deployed. This can be expressed in
+ - multiple ways and is shown in the EXAMPLES section.
+ type: dict
+ count_labels:
+ description:
+ - A list of labels that can only be used with C(exact_count) to determine
+ - how many nodes based on a specific labels criteria should be deployed.
+ - This can be expressed in multiple ways and is shown in the EXAMPLES
+ - section.
+ type: list
+ count:
+ description:
+ - Number of instances to launch
+ default: 1
+ type: int
+ exact_count:
+ description:
+ - Indicates how many instances that match C(count_attributes) and
+ - C(count_labels) parameters should be deployed. Instances are either
+ - created or terminated based on this value.
+ - NOTE':' Instances with the least IDs will be terminated first.
+ type: int
+ mode:
+ description:
+ - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the instance
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the instance
+ type: int
+ memory:
+ description:
+ - The size of the memory for new instances (in MB, GB, ...)
+ type: str
+ disk_size:
+ description:
+ - The size of the disk created for new instances (in MB, GB, TB,...).
+ - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
+ - matched against the order specified in C(template_id)/C(template_name).
+ type: list
+ cpu:
+ description:
+ - Percentage of CPU divided by 100 required for the new instance. Half a
+ - processor is written 0.5.
+ type: float
+ vcpu:
+ description:
+ - Number of CPUs (cores) new VM will have.
+ type: int
+ networks:
+ description:
+ - A list of dictionaries with network parameters. See examples for more details.
+ default: []
+ type: list
+ disk_saveas:
+ description:
+ - Creates an image from a VM disk.
+ - It is a dictionary where you have to specify C(name) of the new image.
+ - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
+ - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
+ - and the VM has to be in the C(poweredoff) state.
+ - Also this operation will fail if an image with specified C(name) already exists.
+ type: dict
+ persistent:
+ description:
+ - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
+ default: NO
+ type: bool
+ version_added: '0.2.0'
+ datastore_id:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: int
+ datastore_name:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+
+EXAMPLES = '''
+- name: Create a new instance
+ community.general.one_vm:
+ template_id: 90
+ register: result
+
+- name: Print VM properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Deploy a new VM on hold
+ community.general.one_vm:
+ template_name: 'app1_template'
+ vm_start_on_hold: 'True'
+
+- name: Deploy a new VM and set its name to 'foo'
+ community.general.one_vm:
+ template_name: 'app1_template'
+ attributes:
+ name: foo
+
+- name: Deploy a new VM and set its group_id and mode
+ community.general.one_vm:
+ template_id: 90
+ group_id: 16
+ mode: 660
+
+- name: Deploy a new VM as persistent
+ community.general.one_vm:
+ template_id: 90
+ persistent: yes
+
+- name: Change VM's permissions to 640
+ community.general.one_vm:
+ instance_ids: 5
+ mode: 640
+
+- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
+ community.general.one_vm:
+ template_id: 15
+ disk_size: 35.2 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 2
+ networks:
+ - NETWORK_ID: 27
+ - NETWORK: "default-network"
+ NETWORK_UNAME: "app-user"
+ SECURITY_GROUPS: "120,124"
+ - NETWORK_ID: 27
+ SECURITY_GROUPS: "10"
+
+- name: Deploy a new instance which uses a Template with two Disks
+ community.general.one_vm:
+ template_id: 42
+ disk_size:
+ - 35.2 GB
+ - 50 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 1
+ networks:
+ - NETWORK_ID: 27
+
+- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: foo
+ bar: bar1
+
+- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ foo1: app1
+ foo2: app2
+ exact_count: 2
+ count_attributes:
+ foo1: app1
+ foo2: app2
+
+- name: Enforce that 4 instances with an attribute 'bar' are deployed
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: app
+ bar: bar2
+ exact_count: 4
+ count_attributes:
+ bar:
+
+# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
+# Names will be: fooapp-00 and fooapp-01
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-##
+ foo: bar
+ labels:
+ - app1
+ - app2
+ count: 2
+
+# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
+# Names will be: fooapp-002 and fooapp-003
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-###
+ app: app1
+ count: 2
+
+# Reboot all instances with name in format 'fooapp-#'
+# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
+- name: Reboot all instances with names in a certain format
+ community.general.one_vm:
+ attributes:
+ name: fooapp-#
+ state: rebooted
+
+# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
+# The task will delete oldest instances, so only the 'fooapp-003' will remain
+- name: Enforce that only 1 instance with name in a certain format is deployed
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 1
+ count_attributes:
+ name: fooapp-#
+
+- name: Deploy an new instance with a network
+ community.general.one_vm:
+ template_id: 53
+ networks:
+ - NETWORK_ID: 27
+ register: vm
+
+- name: Wait for SSH to come up
+ ansible.builtin.wait_for_connection:
+ delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+
+- name: Terminate VMs by ids
+ community.general.one_vm:
+ instance_ids:
+ - 153
+ - 160
+ state: absent
+
+- name: Reboot all VMs that have labels 'foo' and 'app1'
+ community.general.one_vm:
+ labels:
+ - foo
+ - app1
+ state: rebooted
+
+- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
+ community.general.one_vm:
+ attributes:
+ name: foo
+ app: bar
+ register: results
+
+- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ - foo2
+ count: 2
+
+- name: Enforce that only 1 instance with label 'foo1' will be running
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ exact_count: 1
+ count_labels:
+ - foo1
+
+- name: Terminate all instances that have attribute foo
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 0
+ count_attributes:
+ foo:
+
+- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ state: poweredoff
+ disk_saveas:
+ name: foo-image
+
+- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ disk_saveas:
+ name: bar-image
+ disk_id: 1
+'''
+
+RETURN = '''
+instances_ids:
+ description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ type: list
+ returned: success
+ sample: [ 1234, 1235 ]
+instances:
+ description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's owner name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: str
+ sample: 20480 MB
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+tagged_instances:
+ description:
+ - A list of instances info based on a specific attributes and/or
+ - labels that are specified with C(count_attributes) and C(count_labels)
+ - options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's user id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's user name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: list
+ sample: [
+ "20480 MB",
+ "10240 MB"
+ ]
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_template(module, client, predicate):
+
+ pool = client.templatepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all templates user can Use
+ found = 0
+ found_template = None
+ template_name = ''
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ found = found + 1
+ found_template = template
+ template_name = template.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more templates with name: ' + template_name)
+ return found_template
+
+
+def get_template_by_name(module, client, template_name):
+ return get_template(module, client, lambda template: (template.NAME == template_name))
+
+
+def get_template_by_id(module, client, template_id):
+ return get_template(module, client, lambda template: (template.ID == template_id))
+
+
+def get_template_id(module, client, requested_id, requested_name):
+ template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
+ if template:
+ return template.ID
+ else:
+ return None
+
+
+def get_datastore(module, client, predicate):
+ pool = client.datastorepool.info()
+ found = 0
+ found_datastore = None
+ datastore_name = ''
+
+ for datastore in pool.DATASTORE:
+ if predicate(datastore):
+ found = found + 1
+ found_datastore = datastore
+ datastore_name = datastore.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more datastores with name: ' + datastore_name)
+ return found_datastore
+
+
+def get_datastore_by_name(module, client, datastore_name):
+ return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
+
+
+def get_datastore_by_id(module, client, datastore_id):
+ return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
+
+
+def get_datastore_id(module, client, requested_id, requested_name):
+ datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
+ if datastore:
+ return datastore.ID
+ else:
+ return None
+
+
+def get_vm_by_id(client, vm_id):
+ try:
+ vm = client.vm.info(int(vm_id))
+ except BaseException:
+ return None
+ return vm
+
+
+def get_vms_by_ids(module, client, state, ids):
+ vms = []
+
+ for vm_id in ids:
+ vm = get_vm_by_id(client, vm_id)
+ if vm is None and state != 'absent':
+ module.fail_json(msg='There is no VM with id=' + str(vm_id))
+ vms.append(vm)
+
+ return vms
+
+
+def get_vm_info(client, vm):
+
+ vm = client.vm.info(vm.ID)
+
+ networks_info = []
+
+ disk_size = []
+ if 'DISK' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['DISK'], list):
+ for disk in vm.TEMPLATE['DISK']:
+ disk_size.append(disk['SIZE'] + ' MB')
+ else:
+ disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
+
+ if 'NIC' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['NIC'], list):
+ for nic in vm.TEMPLATE['NIC']:
+ networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
+ else:
+ networks_info.append(
+ {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
+ 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
+ import time
+
+ current_time = time.localtime()
+ vm_start_time = time.localtime(vm.STIME)
+
+ vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
+ vm_uptime /= (60 * 60)
+
+ permissions_str = parse_vm_permissions(client, vm)
+
+ # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
+ vm_lcm_state = None
+ if vm.STATE == VM_STATES.index('ACTIVE'):
+ vm_lcm_state = LCM_STATES[vm.LCM_STATE]
+
+ vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ info = {
+ 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
+ 'vm_id': vm.ID,
+ 'vm_name': vm.NAME,
+ 'state': VM_STATES[vm.STATE],
+ 'lcm_state': vm_lcm_state,
+ 'owner_name': vm.UNAME,
+ 'owner_id': vm.UID,
+ 'networks': networks_info,
+ 'disk_size': disk_size,
+ 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
+ 'vcpu': vm.TEMPLATE['VCPU'],
+ 'cpu': vm.TEMPLATE['CPU'],
+ 'group_name': vm.GNAME,
+ 'group_id': vm.GID,
+ 'uptime_h': int(vm_uptime),
+ 'attributes': vm_attributes,
+ 'mode': permissions_str,
+ 'labels': vm_labels
+ }
+
+ return info
+
+
+def parse_vm_permissions(client, vm):
+ vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
+
+ owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
+ group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
+ other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def set_vm_permissions(module, client, vms, permissions):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ old_permissions = parse_vm_permissions(client, vm)
+ changed = changed or old_permissions != permissions
+
+ if not module.check_mode and old_permissions != permissions:
+ permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
+ mode_bits = [int(d) for d in permissions_str]
+ try:
+ client.vm.chmod(
+ vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def set_vm_ownership(module, client, vms, owner_id, group_id):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ if owner_id is None:
+ owner_id = vm.UID
+ if group_id is None:
+ group_id = vm.GID
+
+ changed = changed or owner_id != vm.UID or group_id != vm.GID
+
+ if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
+ try:
+ client.vm.chown(vm.ID, owner_id, group_id)
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def get_size_in_MB(module, size_str):
+
+ SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ s = size_str
+ init = size_str
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ symbol = s.strip()
+
+ if symbol not in SYMBOLS:
+ module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
+
+ prefix = {'B': 1}
+
+ for i, s in enumerate(SYMBOLS[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+
+ size_in_bytes = int(num * prefix[symbol])
+ size_in_MB = size_in_bytes / (1024 * 1024)
+
+ return size_in_MB
+
+
+def create_disk_str(module, client, template_id, disk_size_list):
+
+ if not disk_size_list:
+ return ''
+
+ template = client.template.info(template_id)
+ if isinstance(template.TEMPLATE['DISK'], list):
+ # check if the number of disks is correct
+ if len(template.TEMPLATE['DISK']) != len(disk_size_list):
+ module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
+ result = ''
+ index = 0
+ for DISKS in template.TEMPLATE['DISK']:
+ disk = {}
+ diskresult = ''
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in DISKS.items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
+ result += diskresult
+ index += 1
+ else:
+ if len(disk_size_list) > 1:
+ module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
+ disk = {}
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in template.TEMPLATE['DISK'].items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
+
+ return result
+
+
+def create_attributes_str(attributes_dict, labels_list):
+
+ attributes_str = ''
+
+ if labels_list:
+ attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
+ if attributes_dict:
+ attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
+
+ return attributes_str
+
+
+def create_nics_str(network_attrs_list):
+ nics_str = ''
+
+ for network in network_attrs_list:
+ # Packing key-value dict in string with format key="value", key="value"
+ network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
+ nics_str = nics_str + 'NIC = [' + network_str + ']\n'
+
+ return nics_str
+
+
+def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
+
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ disk_str = create_disk_str(module, client, template_id, disk_size)
+ vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
+ try:
+ vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ vm = get_vm_by_id(client, vm_id)
+
+ return get_vm_info(client, vm)
+
+
+def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
+ counter = 0
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ while cnt_str in vm_filled_indexes_list:
+ counter = counter + 1
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ return cnt_str
+
+
+def get_vm_labels_and_attributes_dict(client, vm_id):
+ vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
+
+ attrs_dict = {}
+ labels_list = []
+
+ for key, value in vm_USER_TEMPLATE.items():
+ if key != 'LABELS':
+ attrs_dict[key] = value
+ else:
+ if key is not None:
+ labels_list = value.split(',')
+
+ return labels_list, attrs_dict
+
+
+def get_all_vms_by_attributes(client, attributes_dict, labels_list):
+ pool = client.vmpool.info(-2, -1, -1, -1).VM
+ vm_list = []
+ name = ''
+ if attributes_dict:
+ name = attributes_dict.pop('NAME', '')
+
+ if name != '':
+ base_name = name[:len(name) - name.count('#')]
+ # Check does the name have indexed format
+ with_hash = name.endswith('#')
+
+ for vm in pool:
+ if vm.NAME.startswith(base_name):
+ if with_hash and vm.NAME[len(base_name):].isdigit():
+ # If the name has indexed format and after base_name it has only digits it'll be matched
+ vm_list.append(vm)
+ elif not with_hash and vm.NAME == name:
+ # If the name is not indexed it has to be same
+ vm_list.append(vm)
+ pool = vm_list
+
+ import copy
+
+ vm_list = copy.copy(pool)
+
+ for vm in pool:
+ remove_list = []
+ vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ if attributes_dict and len(attributes_dict) > 0:
+ for key, val in attributes_dict.items():
+ if key in vm_attributes_dict:
+ if val and vm_attributes_dict[key] != val:
+ remove_list.append(vm)
+ break
+ else:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ remove_list = []
+ if labels_list and len(labels_list) > 0:
+ for label in labels_list:
+ if label not in vm_labels_list:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ return vm_list
+
+
+def create_count_of_vms(
+ module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+ new_vms_list = []
+
+ vm_name = ''
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ if module.check_mode:
+ return True, [], []
+
+ # Create list of used indexes
+ vm_filled_indexes_list = None
+ num_sign_cnt = vm_name.count('#')
+ if vm_name != '' and num_sign_cnt > 0:
+ vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
+ base_name = vm_name[:len(vm_name) - num_sign_cnt]
+ vm_name = base_name
+ # Make list which contains used indexes in format ['000', '001',...]
+ vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
+
+ while count > 0:
+ new_vm_name = vm_name
+ # Create indexed name
+ if vm_filled_indexes_list is not None:
+ next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
+ vm_filled_indexes_list.append(next_index)
+ new_vm_name += next_index
+ # Update NAME value in the attributes in case there is index
+ attributes_dict['NAME'] = new_vm_name
+ new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
+ new_vm_id = new_vm_dict.get('vm_id')
+ new_vm = get_vm_by_id(client, new_vm_id)
+ new_vms_list.append(new_vm)
+ count -= 1
+
+ if vm_start_on_hold:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_hold(module, client, vm, wait_timeout)
+ else:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_running(module, client, vm, wait_timeout)
+
+ return True, new_vms_list, []
+
+
+def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
+ labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+
+ vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
+
+ vm_count_diff = exact_count - len(vm_list)
+ changed = vm_count_diff != 0
+
+ new_vms_list = []
+ instances_list = []
+ tagged_instances_list = vm_list
+
+ if module.check_mode:
+ return changed, instances_list, tagged_instances_list
+
+ if vm_count_diff > 0:
+ # Add more VMs
+ changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
+ labels_list, disk_size, network_attrs_list, wait, wait_timeout,
+ vm_start_on_hold, vm_persistent)
+
+ tagged_instances_list += instances_list
+ elif vm_count_diff < 0:
+ # Delete surplus VMs
+ old_vms_list = []
+
+ while vm_count_diff < 0:
+ old_vm = vm_list.pop(0)
+ old_vms_list.append(old_vm)
+ terminate_vm(module, client, old_vm, hard)
+ vm_count_diff += 1
+
+ if wait:
+ for vm in old_vms_list:
+ wait_for_done(module, client, vm, wait_timeout)
+
+ instances_list = old_vms_list
+ # store only the remaining instances
+ old_vms_set = set(old_vms_list)
+ tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
+
+ return changed, instances_list, tagged_instances_list
+
+
+VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
+LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
+ 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
+ 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
+ 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
+ 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
+
+
+def wait_for_state(module, client, vm, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ vm = client.vm.info(vm.ID)
+ state = vm.STATE
+ lcm_state = vm.LCM_STATE
+
+ if state_predicate(state, lcm_state):
+ return vm
+ elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
+ VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
+ module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_running(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state,
+ lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
+
+
+def wait_for_done(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
+
+
+def wait_for_hold(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
+
+
+def wait_for_poweroff(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
+
+
+def terminate_vm(module, client, vm, hard=False):
+ changed = False
+
+ if not vm:
+ return changed
+
+ changed = True
+
+ if not module.check_mode:
+ if hard:
+ client.vm.action('terminate-hard', vm.ID)
+ else:
+ client.vm.action('terminate', vm.ID)
+
+ return changed
+
+
+def terminate_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = terminate_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def poweroff_vm(module, client, vm, hard):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ if not hard:
+ client.vm.action('poweroff', vm.ID)
+ else:
+ client.vm.action('poweroff-hard', vm.ID)
+
+ return changed
+
+
+def poweroff_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = poweroff_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def reboot_vms(module, client, vms, wait_timeout, hard):
+
+ if not module.check_mode:
+ # Firstly, power-off all instances
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ poweroff_vm(module, client, vm, hard)
+
+ # Wait for all to be power-off
+ for vm in vms:
+ wait_for_poweroff(module, client, vm, wait_timeout)
+
+ for vm in vms:
+ resume_vm(module, client, vm)
+
+ return True
+
+
+def resume_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
+ module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
+ "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
+ if lcm_state not in [LCM_STATES.index('RUNNING')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('resume', vm.ID)
+
+ return changed
+
+
+def resume_vms(module, client, vms):
+ changed = False
+
+ for vm in vms:
+ changed = resume_vm(module, client, vm) or changed
+
+ return changed
+
+
+def check_name_attribute(module, attributes):
+ if attributes.get("NAME"):
+ import re
+ if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
+ module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
+
+
+TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
+ "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
+ "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
+
+
+def check_attributes(module, attributes):
+ for key in attributes.keys():
+ if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
+ module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
+ # Check the format of the name attribute
+ check_name_attribute(module, attributes)
+
+
+def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
+ if not disk_saveas.get('name'):
+ module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
+
+ image_name = disk_saveas.get('name')
+ disk_id = disk_saveas.get('disk_id', 0)
+
+ if not module.check_mode:
+ if vm.STATE != VM_STATES.index('POWEROFF'):
+ module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
+ try:
+ client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not username:
+ if not password:
+ authfile = os.environ.get('ONE_AUTH')
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username = authstring.split(":")[0]
+ password = authstring.split(":")[1]
+ except (OSError, IOError):
+ module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
+ except Exception:
+ module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
+ if not url:
+ module.fail_json(msg="Opennebula API url (api_url) is not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "vm_start_on_hold": {"default": False, "type": "bool"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "wait": {"default": True, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "hard": {"default": False, "type": "bool"},
+ "memory": {"required": False, "type": "str"},
+ "cpu": {"required": False, "type": "float"},
+ "vcpu": {"required": False, "type": "int"},
+ "disk_size": {"required": False, "type": "list"},
+ "datastore_name": {"required": False, "type": "str"},
+ "datastore_id": {"required": False, "type": "int"},
+ "networks": {"default": [], "type": "list"},
+ "count": {"default": 1, "type": "int"},
+ "exact_count": {"required": False, "type": "int"},
+ "attributes": {"default": {}, "type": "dict"},
+ "count_attributes": {"required": False, "type": "dict"},
+ "labels": {"default": [], "type": "list"},
+ "count_labels": {"required": False, "type": "list"},
+ "disk_saveas": {"type": "dict"},
+ "persistent": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'instance_ids'],
+ ['template_id', 'template_name', 'disk_saveas'],
+ ['instance_ids', 'count_attributes', 'count'],
+ ['instance_ids', 'count_labels', 'count'],
+ ['instance_ids', 'exact_count'],
+ ['instance_ids', 'attributes'],
+ ['instance_ids', 'labels'],
+ ['disk_saveas', 'attributes'],
+ ['disk_saveas', 'labels'],
+ ['exact_count', 'count'],
+ ['count', 'hard'],
+ ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
+ ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
+ ['instance_ids', 'networks'],
+ ['persistent', 'disk_size']
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ instance_ids = params.get('instance_ids')
+ requested_template_name = params.get('template_name')
+ requested_template_id = params.get('template_id')
+ put_vm_on_hold = params.get('vm_start_on_hold')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ hard = params.get('hard')
+ memory = params.get('memory')
+ cpu = params.get('cpu')
+ vcpu = params.get('vcpu')
+ disk_size = params.get('disk_size')
+ requested_datastore_id = params.get('datastore_id')
+ requested_datastore_name = params.get('datastore_name')
+ networks = params.get('networks')
+ count = params.get('count')
+ exact_count = params.get('exact_count')
+ attributes = params.get('attributes')
+ count_attributes = params.get('count_attributes')
+ labels = params.get('labels')
+ count_labels = params.get('count_labels')
+ disk_saveas = params.get('disk_saveas')
+ persistent = params.get('persistent')
+
+ if not (auth.username and auth.password):
+ module.warn("Credentials missing")
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if attributes:
+ attributes = dict((key.upper(), value) for key, value in attributes.items())
+ check_attributes(module, attributes)
+
+ if count_attributes:
+ count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
+ if not attributes:
+ import copy
+ module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
+ attributes = copy.copy(count_attributes)
+ check_attributes(module, count_attributes)
+
+ if count_labels and not labels:
+ module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
+ labels = count_labels
+
+ # Fetch template
+ template_id = None
+ if requested_template_id is not None or requested_template_name:
+ template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
+ if template_id is None:
+ if requested_template_id is not None:
+ module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ # Fetch datastore
+ datastore_id = None
+ if requested_datastore_id or requested_datastore_name:
+ datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
+ if datastore_id is None:
+ if requested_datastore_id:
+ module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
+ elif requested_datastore_name:
+ module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
+ else:
+ attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
+
+ if exact_count and template_id is None:
+ module.fail_json(msg='Option `exact_count` needs template_id or template_name')
+
+ if exact_count is not None and not (count_attributes or count_labels):
+ module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
+ if (count_attributes or count_labels) and exact_count is None:
+ module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
+ if template_id is not None and state != 'present':
+ module.fail_json(msg="Only state 'present' is valid for the template")
+
+ if memory:
+ attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
+ if cpu:
+ attributes['CPU'] = str(cpu)
+ if vcpu:
+ attributes['VCPU'] = str(vcpu)
+
+ if exact_count is not None and state != 'present':
+ module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
+ if exact_count is not None and exact_count < 0:
+ module.fail_json(msg='`exact_count` cannot be less than 0')
+ if count <= 0:
+ module.fail_json(msg='`count` has to be greater than 0')
+
+ if permissions is not None:
+ import re
+ if re.match("^[0-7]{3}$", permissions) is None:
+ module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
+
+ if exact_count is not None:
+ # Deploy an exact count of VMs
+ changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
+ count_attributes, labels, count_labels, disk_size,
+ networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
+ vms = tagged_instances_list
+ elif template_id is not None and state == 'present':
+ # Deploy count VMs
+ changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
+ attributes, labels, disk_size, networks, wait, wait_timeout,
+ put_vm_on_hold, persistent)
+ # instances_list - new instances
+ # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
+ vms = instances_list
+ else:
+ # Fetch data of instances, or change their state
+ if not (instance_ids or attributes or labels):
+ module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
+
+ if memory or cpu or vcpu or disk_size or networks:
+ module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
+
+ if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
+ module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
+
+ vms = []
+ tagged = False
+ changed = False
+
+ if instance_ids:
+ vms = get_vms_by_ids(module, one_client, state, instance_ids)
+ else:
+ tagged = True
+ vms = get_all_vms_by_attributes(one_client, attributes, labels)
+
+ if len(vms) == 0 and state != 'absent' and state != 'present':
+ module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
+
+ if len(vms) == 0 and state == 'present' and not tagged:
+ module.fail_json(msg='There are no instances with specified `instance_ids`.')
+
+ if tagged and state == 'absent':
+ module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
+
+ if state == 'absent':
+ changed = terminate_vms(module, one_client, vms, hard)
+ elif state == 'rebooted':
+ changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
+ elif state == 'poweredoff':
+ changed = poweroff_vms(module, one_client, vms, hard)
+ elif state == 'running':
+ changed = resume_vms(module, one_client, vms)
+
+ instances_list = vms
+ tagged_instances_list = []
+
+ if permissions is not None:
+ changed = set_vm_permissions(module, one_client, vms, permissions) or changed
+
+ if owner_id is not None or group_id is not None:
+ changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
+
+ if wait and not module.check_mode and state != 'present':
+ wait_for = {
+ 'absent': wait_for_done,
+ 'rebooted': wait_for_running,
+ 'poweredoff': wait_for_poweroff,
+ 'running': wait_for_running
+ }
+ for vm in vms:
+ if vm is not None:
+ wait_for[state](module, one_client, vm, wait_timeout)
+
+ if disk_saveas is not None:
+ if len(vms) == 0:
+ module.fail_json(msg="There is no VM whose disk will be saved.")
+ disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
+ changed = True
+
+ # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
+ instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
+ instances_ids = list(vm.ID for vm in instances_list if vm is not None)
+ # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
+ tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
+
+ result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
new file mode 100644
index 00000000..90694861
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_firewall_policy
+short_description: Configure 1&1 firewall policy.
+description:
+ - Create, remove, reconfigure, update firewall policies.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a firewall policy state to create, remove, or update.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ firewall_policy:
+ description:
+ - The identifier (id or name) of the firewall policy used with update state.
+ type: str
+ rules:
+ description:
+ - A list of rules that will be set for the firewall policy.
+ Each rule must contain protocol parameter, in addition to three optional parameters
+ (port_from, port_to, and source)
+ type: list
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a firewall policy.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing firewall policy.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ description:
+ description:
+ - Firewall policy description. maxLength=256
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible-firewall-policy
+ description: Testing creation of firewall policies with ansible
+ rules:
+ -
+ protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible-firewall-policy
+
+- name: Update a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: update
+ firewall_policy: ansible-firewall-policy
+ name: ansible-firewall-policy-updated
+ description: Testing creation of firewall policies with ansible - updated
+
+- name: Add server to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ add_server_ips:
+ - server_identifier (id or name)
+ - server_identifier #2 (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ description: Adding rules to an existing firewall policy
+ add_rules:
+ -
+ protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+firewall_policy:
+ description: Information about the firewall policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_firewall_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
+ """
+ Assigns servers to a firewall policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in server_ids:
+ server = get_server(oneandone_conn, _server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.attach_server_firewall_policy(
+ firewall_id=firewall_id,
+ server_ips=attach_servers)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
+ """
+ Unassigns a server/IP from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ firewall_server = oneandone_conn.get_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ if firewall_server:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
+ """
+ Adds new rules to a firewall policy.
+ """
+ try:
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ if module.check_mode:
+ firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
+ if (firewall_rules and firewall_policy_id):
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.add_firewall_policy_rule(
+ firewall_id=firewall_id,
+ firewall_policy_rules=firewall_rules
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
+ """
+ Removes a rule from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_firewall_policy_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_firewall_policy(module, oneandone_conn):
+ """
+ Updates a firewall policy based on input arguments.
+ Firewall rules and server ips can be added/removed to/from
+ firewall policy. Firewall policy name and description can be
+ updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ firewall_policy_id = module.params.get('firewall_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
+ if firewall_policy is None:
+ _check_mode(module, False)
+
+ if name or description:
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.modify_firewall(
+ firewall_id=firewall_policy['id'],
+ name=name,
+ description=description)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_server_ips))
+
+ firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+
+ _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ if add_rules:
+ firewall_policy = _add_firewall_rules(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_rules)
+ _check_mode(module, firewall_policy)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+
+ _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def create_firewall_policy(module, oneandone_conn):
+ """
+ Create a new firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ firewall_policy_obj = oneandone.client.FirewallPolicy(
+ name=name,
+ description=description
+ )
+
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.create_firewall_policy(
+ firewall_policy=firewall_policy_obj,
+ firewall_policy_rules=firewall_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.firewall_policy,
+ firewall_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
+ changed = True if firewall_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_firewall_policy(module, oneandone_conn):
+ """
+ Removes a firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ fp_id = module.params.get('name')
+ firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
+ if module.check_mode:
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
+
+ changed = True if firewall_policy else False
+
+ return (changed, {
+ 'id': firewall_policy['id'],
+ 'name': firewall_policy['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ description=dict(type='str'),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a firewall policy.")
+ try:
+ (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'update':
+ if not module.params.get('firewall_policy'):
+ module.fail_json(
+ msg="'firewall_policy' parameter is required to update a firewall policy.")
+ try:
+ (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ for param in ('name', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new firewall policies." % param)
+ try:
+ (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, firewall_policy=firewall_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
new file mode 100644
index 00000000..62551560
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_load_balancer
+short_description: Configure 1&1 load balancer.
+description:
+ - Create, remove, update load balancers.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a load balancer state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ load_balancer:
+ description:
+ - The identifier (id or name) of the load balancer used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ health_check_test:
+ description:
+ - Type of the health check. At the moment, HTTP is not allowed.
+ type: str
+ choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ health_check_interval:
+ description:
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ type: str
+ health_check_path:
+ description:
+ - Url to call for checking. Required for HTTP health check. maxLength=1000
+ type: str
+ required: false
+ health_check_parse:
+ description:
+ - Regular expression to check. Required for HTTP health check. maxLength=64
+ type: str
+ required: false
+ persistence:
+ description:
+ - Persistence.
+ type: bool
+ persistence_time:
+ description:
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ type: str
+ method:
+ description:
+ - Balancing procedure.
+ type: str
+ choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ datacenter:
+ description:
+ - ID or country code of the datacenter where the load balancer will be created.
+ - If not specified, it defaults to I(US).
+ type: str
+ choices: [ "US", "ES", "DE", "GB" ]
+ required: false
+ rules:
+ description:
+ - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
+ port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ type: list
+ description:
+ description:
+ - Description of the load balancer. maxLength=256
+ type: str
+ required: false
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a load balancer.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing load balancer.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ description: Testing creation of load balancer with ansible
+ health_check_test: TCP
+ health_check_interval: 40
+ persistence: true
+ persistence_time: 1200
+ method: ROUND_ROBIN
+ datacenter: US
+ rules:
+ -
+ protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ wait: true
+ wait_timeout: 500
+ state: absent
+
+- name: Update a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer
+ name: ansible load balancer updated
+ description: Testing the update of a load balancer with ansible
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add server to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding server to a load balancer with ansible
+ add_server_ips:
+ - server identifier (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Removing server from a load balancer with ansible
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ add_rules:
+ -
+ protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+load_balancer:
+ description: Information about the load balancer that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_load_balancer,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
+METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
+ """
+ Assigns servers to a load balancer.
+ """
+ try:
+ attach_servers = []
+
+ for server_id in server_ids:
+ server = get_server(oneandone_conn, server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.attach_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ips=attach_servers)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
+ """
+ Unassigns a server/IP from a load balancer.
+ """
+ try:
+ if module.check_mode:
+ lb_server = oneandone_conn.get_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ if lb_server:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
+ """
+ Adds new rules to a load_balancer.
+ """
+ try:
+ load_balancer_rules = []
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ if module.check_mode:
+ lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
+ if (load_balancer_rules and lb_id):
+ return True
+ return False
+
+ load_balancer = oneandone_conn.add_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
+ """
+ Removes a rule from a load_balancer.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id
+ )
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_load_balancer(module, oneandone_conn):
+ """
+ Updates a load_balancer based on input arguments.
+ Load balancer rules and server ips can be added/removed to/from
+ load balancer. Load balancer name, description, health_check_test,
+ health_check_interval, persistence, persistence_time, and method
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ load_balancer_id = module.params.get('load_balancer')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
+ if load_balancer is None:
+ _check_mode(module, False)
+
+ if (name or description or health_check_test or health_check_interval or health_check_path or
+ health_check_parse or persistence or persistence_time or method):
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.modify_load_balancer(
+ load_balancer_id=load_balancer['id'],
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_server_ips))
+
+ load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+
+ _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ if add_rules:
+ load_balancer = _add_load_balancer_rules(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_rules)
+ _check_mode(module, load_balancer)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+
+ _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ try:
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_load_balancer(module, oneandone_conn):
+ """
+ Create a new load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ datacenter = module.params.get('datacenter')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ load_balancer_rules = []
+
+ datacenter_id = None
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ _check_mode(module, True)
+ load_balancer_obj = oneandone.client.LoadBalancer(
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method,
+ datacenter_id=datacenter_id
+ )
+
+ load_balancer = oneandone_conn.create_load_balancer(
+ load_balancer=load_balancer_obj,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.load_balancer,
+ load_balancer['id'],
+ wait_timeout,
+ wait_interval)
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
+ changed = True if load_balancer else False
+
+ _check_mode(module, False)
+
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_load_balancer(module, oneandone_conn):
+ """
+ Removes a load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ lb_id = module.params.get('name')
+ load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
+ if module.check_mode:
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
+
+ changed = True if load_balancer else False
+
+ return (changed, {
+ 'id': load_balancer['id'],
+ 'name': load_balancer['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ load_balancer=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ health_check_test=dict(
+ choices=HEALTH_CHECK_TESTS),
+ health_check_interval=dict(type='str'),
+ health_check_path=dict(type='str'),
+ health_check_parse=dict(type='str'),
+ persistence=dict(type='bool'),
+ persistence_time=dict(type='str'),
+ method=dict(
+ choices=METHODS),
+ datacenter=dict(
+ choices=DATACENTERS),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a load balancer.")
+ try:
+ (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('load_balancer'):
+ module.fail_json(
+ msg="'load_balancer' parameter is required for updating a load balancer.")
+ try:
+ (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
+ 'persistence_time', 'method', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new load balancers." % param)
+ try:
+ (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, load_balancer=load_balancer)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
new file mode 100644
index 00000000..79fed9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_monitoring_policy
+short_description: Configure 1&1 monitoring policy.
+description:
+ - Create, remove, update monitoring policies
+ (and add/remove ports, processes, and servers).
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a monitoring policy's state to create, remove, update.
+ type: str
+ required: false
+ default: present
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ type: str
+ monitoring_policy:
+ description:
+ - The identifier (id or name) of the monitoring policy used with update state.
+ type: str
+ agent:
+ description:
+ - Set true for using agent.
+ type: str
+ email:
+ description:
+ - User's email. maxLength=128
+ type: str
+ description:
+ description:
+ - Monitoring policy description. maxLength=256
+ type: str
+ required: false
+ thresholds:
+ description:
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical,
+ which both have alert and value suboptions. Warning is used to set limits for
+ warning alerts, critical is used to set critical alerts. alert enables alert,
+ and value is used to advise when the value is exceeded.
+ type: list
+ suboptions:
+ cpu:
+ description:
+ - Consumption limits of CPU.
+ required: true
+ ram:
+ description:
+ - Consumption limits of RAM.
+ required: true
+ disk:
+ description:
+ - Consumption limits of hard disk.
+ required: true
+ internal_ping:
+ description:
+ - Response limits of internal ping.
+ required: true
+ transfer:
+ description:
+ - Consumption limits for transfer.
+ required: true
+ ports:
+ description:
+ - Array of ports that will be monitoring.
+ type: list
+ suboptions:
+ protocol:
+ description:
+ - Internet protocol.
+ choices: [ "TCP", "UDP" ]
+ required: true
+ port:
+ description:
+ - Port number. minimum=1, maximum=65535
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ required: true
+ email_notification:
+ description:
+ - Set true for sending e-mail notifications.
+ required: true
+ processes:
+ description:
+ - Array of processes that will be monitoring.
+ type: list
+ suboptions:
+ process:
+ description:
+ - Name of the process. maxLength=50
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RUNNING", "NOT_RUNNING" ]
+ required: true
+ add_ports:
+ description:
+ - Ports to add to the monitoring policy.
+ type: list
+ required: false
+ add_processes:
+ description:
+ - Processes to add to the monitoring policy.
+ type: list
+ required: false
+ add_servers:
+ description:
+ - Servers to add to the monitoring policy.
+ type: list
+ required: false
+ remove_ports:
+ description:
+ - Ports to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_processes:
+ description:
+ - Processes to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_servers:
+ description:
+ - Servers to remove from the monitoring policy.
+ type: list
+ required: false
+ update_ports:
+ description:
+ - Ports to be updated on the monitoring policy.
+ type: list
+ required: false
+ update_processes:
+ description:
+ - Processes to be updated on the monitoring policy.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible monitoring policy
+ description: Testing creation of a monitoring policy with ansible
+ email: your@emailaddress.com
+ agent: true
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ -
+ ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
+ ports:
+ -
+ protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
+ processes:
+ -
+ process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+
+- name: Destroy a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible monitoring policy
+
+- name: Update a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy
+ name: ansible monitoring policy updated
+ description: Testing creation of a monitoring policy with ansible updated
+ email: another@emailaddress.com
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
+ wait: true
+ state: update
+
+- name: Add a port to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_ports:
+ -
+ protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing ports of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_ports:
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a port from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_ports:
+ - port_id
+ state: update
+
+- name: Add a process to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_processes:
+ -
+ process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing processes of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_processes:
+ -
+ id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ -
+ id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a process from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_processes:
+ - process_id
+ wait: true
+ state: update
+
+- name: Add server to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_servers:
+ - server id or name
+ wait: true
+ state: update
+
+- name: Remove server from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_servers:
+ - server01
+ wait: true
+ state: update
+'''
+
+RETURN = '''
+monitoring_policy:
+ description: Information about the monitoring policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_monitoring_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
+ """
+ Adds new ports to a monitoring policy.
+ """
+ try:
+ monitoring_policy_ports = []
+
+ for _port in ports:
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=_port['protocol'],
+ port=_port['port'],
+ alert_if=_port['alert_if'],
+ email_notification=_port['email_notification']
+ )
+ monitoring_policy_ports.append(monitoring_policy_port)
+
+ if module.check_mode:
+ if monitoring_policy_ports:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_port(
+ monitoring_policy_id=monitoring_policy_id,
+ ports=monitoring_policy_ports)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
+ """
+ Removes a port from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if monitoring_policy:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
+ """
+ Modifies a monitoring policy port.
+ """
+ try:
+ if module.check_mode:
+ cm_port = oneandone_conn.get_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if cm_port:
+ return True
+ return False
+
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=port['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id,
+ port=monitoring_policy_port)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
+ """
+ Adds new processes to a monitoring policy.
+ """
+ try:
+ monitoring_policy_processes = []
+
+ for _process in processes:
+ monitoring_policy_process = oneandone.client.Process(
+ process=_process['process'],
+ alert_if=_process['alert_if'],
+ email_notification=_process['email_notification']
+ )
+ monitoring_policy_processes.append(monitoring_policy_process)
+
+ if module.check_mode:
+ mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
+ if (monitoring_policy_processes and mp_id):
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_process(
+ monitoring_policy_id=monitoring_policy_id,
+ processes=monitoring_policy_processes)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
+ """
+ Removes a process from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id
+ )
+ if process:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
+ """
+ Modifies a monitoring policy process.
+ """
+ try:
+ if module.check_mode:
+ cm_process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ if cm_process:
+ return True
+ return False
+
+ monitoring_policy_process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=process['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id,
+ process=monitoring_policy_process)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
+ """
+ Attaches servers to a monitoring policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in servers:
+ server_id = get_server(oneandone_conn, _server_id)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server_id
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ servers=attach_servers)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
+ """
+ Detaches a server from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ mp_server = oneandone_conn.get_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ if mp_server:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_monitoring_policy(module, oneandone_conn):
+ """
+ Updates a monitoring_policy based on input arguments.
+ Monitoring policy ports, processes and servers can be added/removed to/from
+ a monitoring policy. Monitoring policy name, description, email,
+ thresholds for cpu, ram, disk, transfer and internal_ping
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ monitoring_policy_id = module.params.get('monitoring_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ thresholds = module.params.get('thresholds')
+ add_ports = module.params.get('add_ports')
+ update_ports = module.params.get('update_ports')
+ remove_ports = module.params.get('remove_ports')
+ add_processes = module.params.get('add_processes')
+ update_processes = module.params.get('update_processes')
+ remove_processes = module.params.get('remove_processes')
+ add_servers = module.params.get('add_servers')
+ remove_servers = module.params.get('remove_servers')
+
+ changed = False
+
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
+ if monitoring_policy is None:
+ _check_mode(module, False)
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(
+ name=name,
+ description=description,
+ email=email
+ )
+
+ _thresholds = None
+
+ if thresholds:
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ if name or description or email or thresholds:
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.modify_monitoring_policy(
+ monitoring_policy_id=monitoring_policy['id'],
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds)
+ changed = True
+
+ if add_ports:
+ if module.check_mode:
+ _check_mode(module, _add_ports(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_ports))
+
+ monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
+ changed = True
+
+ if update_ports:
+ chk_changed = False
+ for update_port in update_ports:
+ if module.check_mode:
+ chk_changed |= _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+
+ _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_ports:
+ chk_changed = False
+ for port_id in remove_ports:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+
+ _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_processes:
+ monitoring_policy = _add_processes(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_processes)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if update_processes:
+ chk_changed = False
+ for update_process in update_processes:
+ if module.check_mode:
+ chk_changed |= _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+
+ _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_processes:
+ chk_changed = False
+ for process_id in remove_processes:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+
+ _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_servers:
+ monitoring_policy = _attach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_servers)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if remove_servers:
+ chk_changed = False
+ for _server_id in remove_servers:
+ server_id = get_server(oneandone_conn, _server_id)
+
+ if module.check_mode:
+ chk_changed |= _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+
+ _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_monitoring_policy(module, oneandone_conn):
+ """
+ Creates a new monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ agent = module.params.get('agent')
+ thresholds = module.params.get('thresholds')
+ ports = module.params.get('ports')
+ processes = module.params.get('processes')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(name,
+ description,
+ email,
+ agent, )
+
+ _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
+
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ _ports = []
+ for port in ports:
+ _port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=str(port['email_notification']).lower())
+ _ports.append(_port)
+
+ _processes = []
+ for process in processes:
+ _process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=str(process['email_notification']).lower())
+ _processes.append(_process)
+
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.create_monitoring_policy(
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds,
+ ports=_ports,
+ processes=_processes
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.monitoring_policy,
+ monitoring_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if monitoring_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_monitoring_policy(module, oneandone_conn):
+ """
+ Removes a monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ mp_id = module.params.get('name')
+ monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
+ if module.check_mode:
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
+
+ changed = True if monitoring_policy else False
+
+ return (changed, {
+ 'id': monitoring_policy['id'],
+ 'name': monitoring_policy['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ agent=dict(type='str'),
+ email=dict(type='str'),
+ description=dict(type='str'),
+ thresholds=dict(type='list', default=[]),
+ ports=dict(type='list', default=[]),
+ processes=dict(type='list', default=[]),
+ add_ports=dict(type='list', default=[]),
+ update_ports=dict(type='list', default=[]),
+ remove_ports=dict(type='list', default=[]),
+ add_processes=dict(type='list', default=[]),
+ update_processes=dict(type='list', default=[]),
+ remove_processes=dict(type='list', default=[]),
+ add_servers=dict(type='list', default=[]),
+ remove_servers=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('monitoring_policy'):
+ module.fail_json(
+ msg="'monitoring_policy' parameter is required to update a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for a new monitoring policy." % param)
+ try:
+ (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
new file mode 100644
index 00000000..7eae6ea3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_private_network
+short_description: Configure 1&1 private networking.
+description:
+ - Create, remove, reconfigure, update a private network.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a network's state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ private_network:
+ description:
+ - The identifier (id or name) of the network used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Private network name used with present state. Used as identifier (id or name) when used with absent state.
+ type: str
+ description:
+ description:
+ - Set a description for the network.
+ type: str
+ datacenter:
+ description:
+ - The identifier of the datacenter where the private network will be created
+ type: str
+ choices: [US, ES, DE, GB]
+ network_address:
+ description:
+ - Set a private network space, i.e. 192.168.1.0
+ type: str
+ subnet_mask:
+ description:
+ - Set the netmask for the private network, i.e. 255.255.255.0
+ type: str
+ add_members:
+ description:
+ - List of server identifiers (name or id) to be added to the private network.
+ type: list
+ remove_members:
+ description:
+ - List of server identifiers (name or id) to be removed from the private network.
+ type: list
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ name: backup_network
+ description: Testing creation of a private network with ansible
+ network_address: 70.35.193.100
+ subnet_mask: 255.0.0.0
+ datacenter: US
+
+- name: Destroy a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: backup_network
+
+- name: Modify the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ network_address: 192.168.2.0
+ subnet_mask: 255.255.255.0
+
+- name: Add members to the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ add_members:
+ - server identifier (id or name)
+
+- name: Remove members from the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ remove_members:
+ - server identifier (id or name)
+'''
+
+RETURN = '''
+private_network:
+ description: Information about the private network.
+ type: dict
+ sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_private_network,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_servers(module, oneandone_conn, name, members):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id and members:
+ return True
+ return False
+
+ network = oneandone_conn.attach_private_network_servers(
+ private_network_id=private_network_id,
+ server_ids=members)
+
+ return network
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_member(module, oneandone_conn, name, member_id):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id:
+ network_member = oneandone_conn.get_private_network_server(
+ private_network_id=private_network_id,
+ server_id=member_id)
+ if network_member:
+ return True
+ return False
+
+ network = oneandone_conn.remove_private_network_server(
+ private_network_id=name,
+ server_id=member_id)
+
+ return network
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_network(module, oneandone_conn):
+ """
+ Create new private network
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any network was added.
+ """
+ name = module.params.get('name')
+ description = module.params.get('description')
+ network_address = module.params.get('network_address')
+ subnet_mask = module.params.get('subnet_mask')
+ datacenter = module.params.get('datacenter')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ network = oneandone_conn.create_private_network(
+ private_network=oneandone.client.PrivateNetwork(
+ name=name,
+ description=description,
+ network_address=network_address,
+ subnet_mask=subnet_mask,
+ datacenter_id=datacenter_id
+ ))
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.private_network,
+ network['id'],
+ wait_timeout,
+ wait_interval)
+ network = get_private_network(oneandone_conn,
+ network['id'],
+ True)
+
+ changed = True if network else False
+
+ _check_mode(module, False)
+
+ return (changed, network)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_network(module, oneandone_conn):
+ """
+ Modifies a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ _private_network_id = module.params.get('private_network')
+ _name = module.params.get('name')
+ _description = module.params.get('description')
+ _network_address = module.params.get('network_address')
+ _subnet_mask = module.params.get('subnet_mask')
+ _add_members = module.params.get('add_members')
+ _remove_members = module.params.get('remove_members')
+
+ changed = False
+
+ private_network = get_private_network(oneandone_conn,
+ _private_network_id,
+ True)
+ if private_network is None:
+ _check_mode(module, False)
+
+ if _name or _description or _network_address or _subnet_mask:
+ _check_mode(module, True)
+ private_network = oneandone_conn.modify_private_network(
+ private_network_id=private_network['id'],
+ name=_name,
+ description=_description,
+ network_address=_network_address,
+ subnet_mask=_subnet_mask)
+ changed = True
+
+ if _add_members:
+ instances = []
+
+ for member in _add_members:
+ instance_id = get_server(oneandone_conn, member)
+ instance_obj = oneandone.client.AttachServer(server_id=instance_id)
+
+ instances.extend([instance_obj])
+ private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
+ _check_mode(module, private_network)
+ changed = True
+
+ if _remove_members:
+ chk_changed = False
+ for member in _remove_members:
+ instance = get_server(oneandone_conn, member, True)
+
+ if module.check_mode:
+ chk_changed |= _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ _check_mode(module, instance and chk_changed)
+
+ _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ private_network = get_private_network(oneandone_conn,
+ private_network['id'],
+ True)
+ changed = True
+
+ return (changed, private_network)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_network(module, oneandone_conn):
+ """
+ Removes a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+ """
+ try:
+ pn_id = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ private_network_id = get_private_network(oneandone_conn, pn_id)
+ if module.check_mode:
+ if private_network_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ private_network = oneandone_conn.delete_private_network(private_network_id)
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.private_network,
+ private_network['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if private_network else False
+
+ return (changed, {
+ 'id': private_network['id'],
+ 'name': private_network['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ private_network=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ network_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ add_members=dict(type='list', default=[]),
+ remove_members=dict(type='list', default=[]),
+ datacenter=dict(
+ choices=DATACENTERS),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a network.")
+ try:
+ (changed, private_network) = remove_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('private_network'):
+ module.fail_json(
+ msg="'private_network' parameter is required for updating a network.")
+ try:
+ (changed, private_network) = update_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for new networks.")
+ try:
+ (changed, private_network) = create_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, private_network=private_network)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
new file mode 100644
index 00000000..edefbc93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_public_ip
+short_description: Configure 1&1 public IPs.
+description:
+ - Create, update, and remove public IPs.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a public ip state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ reverse_dns:
+ description:
+ - Reverse DNS name. maxLength=256
+ type: str
+ required: false
+ datacenter:
+ description:
+ - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ type: str
+ choices: [US, ES, DE, GB]
+ default: US
+ required: false
+ type:
+ description:
+ - Type of IP. Currently, only IPV4 is available.
+ type: str
+ choices: ["IPV4", "IPV6"]
+ default: 'IPV4'
+ required: false
+ public_ip_id:
+ description:
+ - The ID of the public IP used with update and delete states.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ reverse_dns: example.com
+ datacenter: US
+ type: IPV4
+
+- name: Update a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ reverse_dns: secondexample.com
+ state: update
+
+- name: Delete a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ state: absent
+'''
+
+RETURN = '''
+public_ip:
+ description: Information about the public ip that was processed
+ type: dict
+ sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_public_ip,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+TYPES = ['IPV4', 'IPV6']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def create_public_ip(module, oneandone_conn):
+ """
+ Create new public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was added.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ datacenter = module.params.get('datacenter')
+ ip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.create_public_ip(
+ reverse_dns=reverse_dns,
+ ip_type=ip_type,
+ datacenter_id=datacenter_id)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_public_ip(module, oneandone_conn):
+ """
+ Update a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was changed.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ public_ip_id = module.params.get('public_ip_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.modify_public_ip(
+ ip_id=public_ip['id'],
+ reverse_dns=reverse_dns)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_public_ip(module, oneandone_conn):
+ """
+ Delete a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was deleted.
+ """
+ public_ip_id = module.params.get('public_ip_id')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ deleted_public_ip = oneandone_conn.delete_public_ip(
+ ip_id=public_ip['id'])
+
+ changed = True if deleted_public_ip else False
+
+ return (changed, {
+ 'id': public_ip['id']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ public_ip_id=dict(type='str'),
+ reverse_dns=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ type=dict(
+ choices=TYPES,
+ default='IPV4'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to delete a public ip.")
+ try:
+ (changed, public_ip) = delete_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to update a public ip.")
+ try:
+ (changed, public_ip) = update_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ try:
+ (changed, public_ip) = create_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, public_ip=public_ip)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py
new file mode 100644
index 00000000..1e6caab5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py
@@ -0,0 +1,705 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_server
+short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
+description:
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
+ When the server is created it can optionally wait for it to be 'running' before returning.
+options:
+ state:
+ description:
+ - Define a server's state to create, remove, start or stop it.
+ type: str
+ default: present
+ choices: [ "present", "absent", "running", "stopped" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1. Overrides the
+ ONEANDONE_AUTH_TOKEN environment variable.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ datacenter:
+ description:
+ - The datacenter location.
+ type: str
+ default: US
+ choices: [ "US", "ES", "DE", "GB" ]
+ hostname:
+ description:
+ - The hostname or ID of the server. Only used when state is 'present'.
+ type: str
+ description:
+ description:
+ - The description of the server.
+ type: str
+ appliance:
+ description:
+ - The operating system name or ID for the server.
+ It is required only for 'present' state.
+ type: str
+ fixed_instance_size:
+ description:
+ - The instance size name or ID of the server.
+ It is required only for 'present' state, and it is mutually exclusive with
+ vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ type: str
+ vcore:
+ description:
+ - The total number of processors.
+ It must be provided with cores_per_processor, ram, and hdds parameters.
+ type: int
+ cores_per_processor:
+ description:
+ - The number of cores per processor.
+ It must be provided with vcore, ram, and hdds parameters.
+ type: int
+ ram:
+ description:
+ - The amount of RAM memory.
+ It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ type: float
+ hdds:
+ description:
+ - A list of hard disks with nested "size" and "is_main" properties.
+ It must be provided with vcore, cores_per_processor, and ram parameters.
+ type: list
+ private_network:
+ description:
+ - The private network name or ID.
+ type: str
+ firewall_policy:
+ description:
+ - The firewall policy name or ID.
+ type: str
+ load_balancer:
+ description:
+ - The load balancer name or ID.
+ type: str
+ monitoring_policy:
+ description:
+ - The monitoring policy name or ID.
+ type: str
+ server:
+ description:
+ - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
+ type: str
+ count:
+ description:
+ - The number of servers to create.
+ type: int
+ default: 1
+ ssh_key:
+ description:
+ - User's public SSH key (contents, not path).
+ type: raw
+ server_type:
+ description:
+ - The type of server to be built.
+ type: str
+ default: "cloud"
+ choices: [ "cloud", "baremetal", "k8s_node" ]
+ wait:
+ description:
+ - Wait for the server to be in state 'running' before returning.
+ Also used for delete operation (set to 'false' if you don't want to wait
+ for each individual server to be deleted before moving on with
+ other tasks.)
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the wait_for methods
+ type: int
+ default: 5
+ auto_increment:
+ description:
+ - When creating multiple servers at once, whether to differentiate
+ hostnames by appending a count after them or substituting the count
+ where there is a %02d or %03d in the hostname string.
+ type: bool
+ default: 'yes'
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+
+'''
+
+EXAMPLES = '''
+- name: Create three servers and enumerate their names
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ fixed_instance_size: XL
+ datacenter: US
+ appliance: C5A349786169F140BCBC335675014C08
+ auto_increment: true
+ count: 3
+
+- name: Create three servers, passing in an ssh_key
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ vcore: 2
+ cores_per_processor: 4
+ ram: 8.0
+ hdds:
+ - size: 50
+ is_main: false
+ datacenter: ES
+ appliance: C5A349786169F140BCBC335675014C08
+ count: 3
+ wait: yes
+ wait_timeout: 600
+ wait_interval: 10
+ ssh_key: SSH_PUBLIC_KEY
+
+- name: Removing server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: absent
+ server: 'node01'
+
+- name: Starting server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: running
+ server: 'node01'
+
+- name: Stopping server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: stopped
+ server: 'node01'
+'''
+
+RETURN = '''
+servers:
+ description: Information about each server that was processed
+ type: list
+ sample: '[{"hostname": "my-server", "id": "server-id"}]'
+ returned: always
+'''
+
+import os
+import time
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_fixed_instance_size,
+ get_appliance,
+ get_private_network,
+ get_monitoring_policy,
+ get_firewall_policy,
+ get_load_balancer,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+ONEANDONE_SERVER_STATES = (
+ 'DEPLOYING',
+ 'POWERED_OFF',
+ 'POWERED_ON',
+ 'POWERING_ON',
+ 'POWERING_OFF',
+)
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _create_server(module, oneandone_conn, hostname, description,
+ fixed_instance_size_id, vcore, cores_per_processor, ram,
+ hdds, datacenter_id, appliance_id, ssh_key,
+ private_network_id, firewall_policy_id, load_balancer_id,
+ monitoring_policy_id, server_type, wait, wait_timeout,
+ wait_interval):
+
+ try:
+ existing_server = get_server(oneandone_conn, hostname)
+
+ if existing_server:
+ if module.check_mode:
+ return False
+ return None
+
+ if module.check_mode:
+ return True
+
+ server = oneandone_conn.create_server(
+ oneandone.client.Server(
+ name=hostname,
+ description=description,
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ appliance_id=appliance_id,
+ datacenter_id=datacenter_id,
+ rsa_key=ssh_key,
+ private_network_id=private_network_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ monitoring_policy_id=monitoring_policy_id,
+ server_type=server_type,), hdds)
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+
+ return server
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _insert_network_data(server):
+ for addr_data in server['ips']:
+ if addr_data['type'] == 'IPV6':
+ server['public_ipv6'] = addr_data['ip']
+ elif addr_data['type'] == 'IPV4':
+ server['public_ipv4'] = addr_data['ip']
+ return server
+
+
+def create_server(module, oneandone_conn):
+ """
+ Create new server
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any server was added, and a 'servers' attribute with the list of the
+ created servers' hostname, id and ip addresses.
+ """
+ hostname = module.params.get('hostname')
+ description = module.params.get('description')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ fixed_instance_size = module.params.get('fixed_instance_size')
+ vcore = module.params.get('vcore')
+ cores_per_processor = module.params.get('cores_per_processor')
+ ram = module.params.get('ram')
+ hdds = module.params.get('hdds')
+ datacenter = module.params.get('datacenter')
+ appliance = module.params.get('appliance')
+ ssh_key = module.params.get('ssh_key')
+ private_network = module.params.get('private_network')
+ monitoring_policy = module.params.get('monitoring_policy')
+ firewall_policy = module.params.get('firewall_policy')
+ load_balancer = module.params.get('load_balancer')
+ server_type = module.params.get('server_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ fixed_instance_size_id = None
+ if fixed_instance_size:
+ fixed_instance_size_id = get_fixed_instance_size(
+ oneandone_conn,
+ fixed_instance_size)
+ if fixed_instance_size_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='fixed_instance_size %s not found.' % fixed_instance_size)
+
+ appliance_id = get_appliance(oneandone_conn, appliance)
+ if appliance_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='appliance %s not found.' % appliance)
+
+ private_network_id = None
+ if private_network:
+ private_network_id = get_private_network(
+ oneandone_conn,
+ private_network)
+ if private_network_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='private network %s not found.' % private_network)
+
+ monitoring_policy_id = None
+ if monitoring_policy:
+ monitoring_policy_id = get_monitoring_policy(
+ oneandone_conn,
+ monitoring_policy)
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='monitoring policy %s not found.' % monitoring_policy)
+
+ firewall_policy_id = None
+ if firewall_policy:
+ firewall_policy_id = get_firewall_policy(
+ oneandone_conn,
+ firewall_policy)
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='firewall policy %s not found.' % firewall_policy)
+
+ load_balancer_id = None
+ if load_balancer:
+ load_balancer_id = get_load_balancer(
+ oneandone_conn,
+ load_balancer)
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='load balancer %s not found.' % load_balancer)
+
+ if auto_increment:
+ hostnames = _auto_increment_hostname(count, hostname)
+ descriptions = _auto_increment_description(count, description)
+ else:
+ hostnames = [hostname] * count
+ descriptions = [description] * count
+
+ hdd_objs = []
+ if hdds:
+ for hdd in hdds:
+ hdd_objs.append(oneandone.client.Hdd(
+ size=hdd['size'],
+ is_main=hdd['is_main']
+ ))
+
+ servers = []
+ for index, name in enumerate(hostnames):
+ server = _create_server(
+ module=module,
+ oneandone_conn=oneandone_conn,
+ hostname=name,
+ description=descriptions[index],
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ hdds=hdd_objs,
+ datacenter_id=datacenter_id,
+ appliance_id=appliance_id,
+ ssh_key=ssh_key,
+ private_network_id=private_network_id,
+ monitoring_policy_id=monitoring_policy_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ server_type=server_type,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ wait_interval=wait_interval)
+ if server:
+ servers.append(server)
+
+ changed = False
+
+ if servers:
+ for server in servers:
+ if server:
+ _check_mode(module, True)
+ _check_mode(module, False)
+ servers = [_insert_network_data(_server) for _server in servers]
+ changed = True
+
+ _check_mode(module, False)
+
+ return (changed, servers)
+
+
+def remove_server(module, oneandone_conn):
+ """
+ Removes a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ the server was removed, and a 'removed_server' attribute with
+ the removed server's hostname and id.
+ """
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+ removed_server = None
+
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ _check_mode(module, True)
+ try:
+ oneandone_conn.delete_server(server_id=server['id'])
+ if wait:
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ changed = True
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to terminate the server: %s" % str(ex))
+
+ removed_server = {
+ 'id': server['id'],
+ 'hostname': server['name']
+ }
+ _check_mode(module, False)
+
+ return (changed, removed_server)
+
+
+def startstop_server(module, oneandone_conn):
+ """
+ Starts or Stops a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary with a 'changed' attribute indicating whether
+ anything has changed for the server as a result of this function
+ being run, and a 'server' attribute with basic information for
+ the server.
+ """
+ state = module.params.get('state')
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+
+ # Resolve server
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ # Attempt to change the server state, only if it's not already there
+ # or on its way.
+ try:
+ if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_OFF',
+ method='SOFTWARE')
+ elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_ON',
+ method='SOFTWARE')
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to set server %s to state %s: %s" % (
+ server_id, state, str(ex)))
+
+ _check_mode(module, False)
+
+ # Make sure the server has reached the desired state
+ if wait:
+ operation_completed = False
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+ server_state = server['status']['state']
+ if state == 'stopped' and server_state == 'POWERED_OFF':
+ operation_completed = True
+ break
+ if state == 'running' and server_state == 'POWERED_ON':
+ operation_completed = True
+ break
+ if not operation_completed:
+ module.fail_json(
+ msg="Timeout waiting for server %s to get to state %s" % (
+ server_id, state))
+
+ changed = True
+ server = _insert_network_data(server)
+
+ _check_mode(module, False)
+
+ return (changed, server)
+
+
+def _auto_increment_hostname(count, hostname):
+ """
+ Allow a custom incremental count in the hostname when defined with the
+ string formatting (%) operator. Otherwise, increment using name-01,
+ name-02, name-03, and so forth.
+ """
+ if '%' not in hostname:
+ hostname = "%s-%%01d" % hostname
+
+ return [
+ hostname % i
+ for i in xrange(1, count + 1)
+ ]
+
+
+def _auto_increment_description(count, description):
+ """
+ Allow the incremental count in the description when defined with the
+ string formatting (%) operator. Otherwise, repeat the same description.
+ """
+ if '%' in description:
+ return [
+ description % i
+ for i in xrange(1, count + 1)
+ ]
+ else:
+ return [description] * count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
+ no_log=True),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ hostname=dict(type='str'),
+ description=dict(type='str'),
+ appliance=dict(type='str'),
+ fixed_instance_size=dict(type='str'),
+ vcore=dict(type='int'),
+ cores_per_processor=dict(type='int'),
+ ram=dict(type='float'),
+ hdds=dict(type='list'),
+ count=dict(type='int', default=1),
+ ssh_key=dict(type='raw'),
+ auto_increment=dict(type='bool', default=True),
+ server=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ private_network=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ load_balancer=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
+ ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
+ required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for deleting a server.")
+ try:
+ (changed, servers) = remove_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for starting/stopping a server.")
+ try:
+ (changed, servers) = startstop_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('hostname',
+ 'appliance',
+ 'datacenter'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new server." % param)
+ try:
+ (changed, servers) = create_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, servers=servers)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
new file mode 100644
index 00000000..a81e144a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network
+short_description: Manage OneView Ethernet Network resources
+description:
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ - C(default_bandwidth_reset) will reset the network connection template to the default.
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Ethernet Network is present using the default configuration
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ vlanId: '201'
+ delegate_to: localhost
+
+- name: Update the Ethernet Network changing bandwidth and purpose
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ newName: 'Renamed Ethernet Network'
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is absent
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: 'New Ethernet Network'
+ delegate_to: localhost
+
+- name: Create Ethernet networks in bulk
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ vlanIdRange: '1-10,15,17'
+ purpose: General
+ namePrefix: TestNetwork
+ smartLink: false
+ privateNetwork: false
+ bandwidth:
+ maximumBandwidth: 10000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Reset to the default network connection template
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: default_bandwidth_reset
+ data:
+ name: 'Test Ethernet Network'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ethernet_network:
+ description: Has the facts about the Ethernet Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+
+ethernet_network_bulk:
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When 'vlanIdRange' attribute is in data argument. Can be null.
+ type: dict
+
+ethernet_network_connection_template:
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On state 'default_bandwidth_reset'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class EthernetNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'Ethernet Network created successfully.'
+ MSG_UPDATED = 'Ethernet Network updated successfully.'
+ MSG_DELETED = 'Ethernet Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
+ MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
+
+ MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
+ MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
+ MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
+ MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
+
+ RESOURCE_FACT_NAME = 'ethernet_network'
+
+ def __init__(self):
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
+ data=dict(type='dict', required=True),
+ )
+
+ super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+
+ changed, msg, ansible_facts, resource = False, '', {}, None
+
+ if self.data.get('name'):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ if self.data.get('vlanIdRange'):
+ return self._bulk_present()
+ else:
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+ elif self.state == 'default_bandwidth_reset':
+ changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
+
+ def _present(self, resource):
+
+ bandwidth = self.data.pop('bandwidth', None)
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if bandwidth:
+ if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
+ result['changed'] = True
+ result['msg'] = self.MSG_UPDATED
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
+
+ return result
+
+ def _bulk_present(self):
+ vlan_id_range = self.data['vlanIdRange']
+ result = dict(ansible_facts={})
+ ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ if not ethernet_networks:
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_BULK_CREATED
+
+ else:
+ vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
+ for net in ethernet_networks[:]:
+ vlan_ids.remove(net['vlanId'])
+
+ if len(vlan_ids) == 0:
+ result['msg'] = self.MSG_BULK_ALREADY_EXIST
+ result['changed'] = False
+ else:
+ if len(vlan_ids) == 1:
+ self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
+ else:
+ self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
+
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_MISSING_BULK_CREATED
+ result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ return result
+
+ def _update_connection_template(self, ethernet_network, bandwidth):
+
+ if 'connectionTemplateUri' not in ethernet_network:
+ return False, None
+
+ connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
+
+ merged_data = connection_template.copy()
+ merged_data.update({'bandwidth': bandwidth})
+
+ if not self.compare(connection_template, merged_data):
+ connection_template = self.oneview_client.connection_templates.update(merged_data)
+ return True, connection_template
+ else:
+ return False, None
+
+ def _default_bandwidth_reset(self, resource):
+
+ if not resource:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
+
+ default_connection_template = self.oneview_client.connection_templates.get_default()
+
+ changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
+
+ return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
+ ethernet_network_connection_template=connection_template)
+
+
+def main():
+ EthernetNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
new file mode 100644
index 00000000..45fa035c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network
+short_description: Manage OneView Fibre Channel Network resources.
+description:
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+requirements:
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Fibre Channel Network is present using the default configuration
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+
+- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ fabricType: 'DirectAttach'
+
+- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+
+- name: Ensure that the Fibre Channel Network is absent
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: absent
+ data:
+ name: 'New FC Network'
+'''
+
+RETURN = '''
+fc_network:
+ description: Has the facts about the managed OneView FC Network.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FC Network created successfully.'
+ MSG_UPDATED = 'FC Network updated successfully.'
+ MSG_DELETED = 'FC Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FC Network is already present.'
+ MSG_ALREADY_ABSENT = 'FC Network is already absent.'
+ RESOURCE_FACT_NAME = 'fc_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent']))
+
+ super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fc_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self._present(resource)
+ else:
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fc_network', scope_uris)
+ return result
+
+
+def main():
+ FcNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
new file mode 100644
index 00000000..79d8ae21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network
+short_description: Manage OneView FCoE Network resources
+description:
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+requirements:
+ - "python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that FCoE Network is present using the default configuration
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: Test FCoE Network
+ vlanId: 201
+ delegate_to: localhost
+
+- name: Update the FCOE network scopes
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: New FCoE Network
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+ delegate_to: localhost
+
+- name: Ensure that FCoE Network is absent
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: New FCoE Network
+ delegate_to: localhost
+'''
+
+RETURN = '''
+fcoe_network:
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FCoE Network created successfully.'
+ MSG_UPDATED = 'FCoE Network updated successfully.'
+ MSG_DELETED = 'FCoE Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
+ MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
+ RESOURCE_FACT_NAME = 'fcoe_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent']))
+
+ super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fcoe_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
+ return result
+
+
+def main():
+ FcoeNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
new file mode 100644
index 00000000..8ca49e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group
+short_description: Manage OneView Logical Interconnect Group resources
+description:
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ C(absent) will remove the resource from OneView, if it exists.
+ C(present) will ensure data properties are compliant with OneView.
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Logical Interconnect Group is present
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ uplinkSets: []
+ enclosureType: C7000
+ interconnectMapTemplate:
+ interconnectMapEntryTemplates:
+ - logicalDownlinkUri: ~
+ logicalLocation:
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
+ permittedInterconnectTypeName: HP VC Flex-10/10D Module
+ # Alternatively you can inform permittedInterconnectTypeUri
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group has the specified scopes
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ scopeUris:
+ - /rest/scopes/00SC123456
+ - /rest/scopes/01SC123456
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is present with name 'Test'
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: New Logical Interconnect Group
+ newName: Test
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is absent
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: New Logical Interconnect Group
+ delegate_to: localhost
+'''
+
+RETURN = '''
+logical_interconnect_group:
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class LogicalInterconnectGroupModule(OneViewModuleBase):
+ MSG_CREATED = 'Logical Interconnect Group created successfully.'
+ MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
+ MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
+ MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
+ MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
+
+ RESOURCE_FACT_NAME = 'logical_interconnect_group'
+
+ def __init__(self):
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict')
+ )
+
+ super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.logical_interconnect_groups
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+
+ self.__replace_name_by_uris(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
+
+ return result
+
+ def __replace_name_by_uris(self, data):
+ map_template = data.get('interconnectMapTemplate')
+
+ if map_template:
+ map_entry_templates = map_template.get('interconnectMapEntryTemplates')
+ if map_entry_templates:
+ for value in map_entry_templates:
+ permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
+ if permitted_interconnect_type_name:
+ value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
+ permitted_interconnect_type_name).get('uri')
+
+ def __get_interconnect_type_by_name(self, name):
+ i_type = self.oneview_client.interconnect_types.get_by('name', name)
+ if i_type:
+ return i_type[0]
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+
+def main():
+ LogicalInterconnectGroupModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py
new file mode 100644
index 00000000..cc70d5e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set
+short_description: Manage HPE OneView Network Set resources
+description:
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Create a Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ networkUris:
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ delegate_to: localhost
+
+- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ newName: OneViewSDK Test Network Set - Renamed
+ networkUris:
+ - Test Ethernet Network_1
+ delegate_to: localhost
+
+- name: Delete the Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: OneViewSDK Test Network Set - Renamed
+ delegate_to: localhost
+
+- name: Update the Network set with two scopes
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ scopeUris:
+ - /rest/scopes/01SC123456
+ - /rest/scopes/02SC123456
+ delegate_to: localhost
+'''
+
+RETURN = '''
+network_set:
+ description: Has the facts about the Network Set.
+ returned: On state 'present', but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class NetworkSetModule(OneViewModuleBase):
+ MSG_CREATED = 'Network Set created successfully.'
+ MSG_UPDATED = 'Network Set updated successfully.'
+ MSG_DELETED = 'Network Set deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Network Set is already present.'
+ MSG_ALREADY_ABSENT = 'Network Set is already absent.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
+ RESOURCE_FACT_NAME = 'network_set'
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict'))
+
+ def __init__(self):
+ super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.network_sets
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ self._replace_network_name_by_uri(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
+ return result
+
+ def _get_ethernet_network_by_name(self, name):
+ result = self.oneview_client.ethernet_networks.get_by('name', name)
+ return result[0] if result else None
+
+ def _get_network_uri(self, network_name_or_uri):
+ if network_name_or_uri.startswith('/rest/ethernet-networks'):
+ return network_name_or_uri
+ else:
+ enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
+ if enet_network:
+ return enet_network['uri']
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
+
+ def _replace_network_name_by_uri(self, data):
+ if 'networkUris' in data:
+ data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
+
+
+def main():
+ NetworkSetModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
new file mode 100644
index 00000000..57e93475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager
+short_description: Manage OneView SAN Manager resources
+description:
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - C(present) ensures data properties are compliant with OneView.
+ - C(absent) removes the resource from OneView, if it exists.
+ - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ providerDisplayName: Brocade Network Advisor
+ connectionInfo:
+ - name: Host
+ value: 172.18.15.1
+ - name: Port
+ value: 5989
+ - name: Username
+ value: username
+ - name: Password
+ value: password
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Ensure a Device Manager for the Cisco SAN Provider is present
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.20.1
+ providerDisplayName: Cisco
+ connectionInfo:
+ - name: Host
+ value: 172.18.20.1
+ - name: SnmpPort
+ value: 161
+ - name: SnmpUserName
+ value: admin
+ - name: SnmpAuthLevel
+ value: authnopriv
+ - name: SnmpAuthProtocol
+ value: sha
+ - name: SnmpAuthString
+ value: password
+ delegate_to: localhost
+
+- name: Sets the SAN Manager connection information
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: connection_information_set
+ data:
+ connectionInfo:
+ - name: Host
+ value: '172.18.15.1'
+ - name: Port
+ value: '5989'
+ - name: Username
+ value: 'username'
+ - name: Password
+ value: 'password'
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Refreshes the SAN Manager
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.15.1
+ refreshState: RefreshPending
+ delegate_to: localhost
+
+- name: Delete the SAN Manager recently created
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: '172.18.15.1'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+san_manager:
+ description: Has the OneView facts about the SAN Manager.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
+
+
+class SanManagerModule(OneViewModuleBase):
+ MSG_CREATED = 'SAN Manager created successfully.'
+ MSG_UPDATED = 'SAN Manager updated successfully.'
+ MSG_DELETED = 'SAN Manager deleted successfully.'
+ MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
+ MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
+ MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
+ data=dict(type='dict', required=True)
+ )
+
+ def __init__(self):
+ super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.data.get('connectionInfo'):
+ for connection_hash in self.data.get('connectionInfo'):
+ if connection_hash.get('name') == 'Host':
+ resource_name = connection_hash.get('value')
+ elif self.data.get('name'):
+ resource_name = self.data.get('name')
+ else:
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+ raise OneViewModuleValueError(msg.format())
+
+ resource = self.resource_client.get_by_name(resource_name)
+
+ if self.state == 'present':
+ changed, msg, san_manager = self._present(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ elif self.state == 'absent':
+ return self.resource_absent(resource, method='remove')
+
+ elif self.state == 'connection_information_set':
+ changed, msg, san_manager = self._connection_information_set(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ def _present(self, resource):
+ if not resource:
+ provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
+ return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ # Remove 'connectionInfo' from comparison, since it is not possible to validate it.
+ resource.pop('connectionInfo', None)
+ merged_data.pop('connectionInfo', None)
+
+ if self.compare(resource, merged_data):
+ return False, self.MSG_ALREADY_PRESENT, resource
+ else:
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _connection_information_set(self, resource):
+ if not resource:
+ return self._present(resource)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+ merged_data.pop('refreshState', None)
+ if not self.data.get('connectionInfo', None):
+ raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _get_provider_uri_by_display_name(self, data):
+ display_name = data.get('providerDisplayName')
+ provider_uri = self.resource_client.get_provider_uri(display_name)
+
+ if not provider_uri:
+ raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
+
+ return provider_uri
+
+
+def main():
+ SanManagerModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py
new file mode 100644
index 00000000..f1e74aa6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_server_info) instead.
+short_description: Gather facts about Online servers.
+description:
+ - Gather facts about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server facts
+ community.general.online_server_facts:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+'''
+
+RETURN = r'''
+---
+online_server_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_facts": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineServerFacts, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_facts = OnlineServerFacts(module).all_detailed_servers()
+ module.exit_json(
+ ansible_facts={'online_server_facts': servers_facts}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py
new file mode 100644
index 00000000..f0e73aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_info
+short_description: Gather information about Online servers.
+description:
+ - Gather information about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server information
+ community.general.online_server_info:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_server_info }}"
+'''
+
+RETURN = r'''
+---
+online_server_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_info": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineServerInfo, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_info = OnlineServerInfo(module).all_detailed_servers()
+ module.exit_json(
+ online_server_info=servers_info
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py
new file mode 100644
index 00000000..7b78924e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_user_info) instead.
+short_description: Gather facts about Online user.
+description:
+ - Gather facts about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user facts
+ community.general.online_user_facts:
+'''
+
+RETURN = r'''
+---
+online_user_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_facts": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineUserFacts, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py
new file mode 100644
index 00000000..093a2c68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_info
+short_description: Gather information about Online user.
+description:
+ - Gather information about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user info
+ community.general.online_user_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_user_info }}"
+'''
+
+RETURN = r'''
+---
+online_user_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_info": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineUserInfo, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ online_user_info=OnlineUserInfo(module).get_resources()
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py
new file mode 100644
index 00000000..222bb82f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: open_iscsi
+author:
+- Serge van Ginderachter (@srvg)
+short_description: Manage iSCSI targets with Open-iSCSI
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+options:
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ ip ]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: 3260
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [ name, targetname ]
+ login:
+ description:
+ - Whether the target node should be connected.
+ type: bool
+ aliases: [ state ]
+ node_auth:
+ description:
+ - The value for C(discovery.sendtargets.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(discovery.sendtargets.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(discovery.sendtargets.auth.password).
+ type: str
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [ automatic ]
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
+ to manual, hence combined with C(auto_node_startup=yes) will always return
+ a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Perform a discovery on sun.com and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ portal: sun.com
+
+- name: Perform a discovery on 10.1.2.3 and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ ip: 10.1.2.3
+
+# NOTE: Only works if exactly one target is exported to the initiator
+- name: Discover targets on portal and login to the one available
+ community.general.open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: yes
+ discover: yes
+
+- name: Connect to the named target, after updating the local persistent database (cache)
+ community.general.open_iscsi:
+ login: yes
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Disconnect from the cached named target
+ community.general.open_iscsi:
+ login: no
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+'''
+
+import glob
+import os
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+ISCSIADM = 'iscsiadm'
+
+
+def compare_nodelists(l1, l2):
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+ cmd = '%s --mode node' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ nodes = []
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ nodes = []
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+ cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_loggedon(module, target):
+ cmd = '%s --mode session' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ return target in out
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target, portal=None, port=None):
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ (rc, out, err) = module.run_command(cmd)
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
+ if portal is not None and port is not None:
+ cmd += ' --portal %s:%s' % (portal, port)
+
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_logout(module, target):
+ cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_device_node(module, target):
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target):
+ cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setauto(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setmanual(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def main():
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # target
+ portal=dict(type='str', aliases=['ip']),
+ port=dict(type='str', default='3260'),
+ target=dict(type='str', aliases=['name', 'targetname']),
+ node_auth=dict(type='str', default='CHAP'),
+ node_user=dict(type='str'),
+ node_pass=dict(type='str', no_log=True),
+
+ # actions
+ login=dict(type='bool', aliases=['state']),
+ auto_node_startup=dict(type='bool', aliases=['automatic']),
+ discover=dict(type='bool', default=False),
+ show_nodes=dict(type='bool', default=False),
+ ),
+
+ required_together=[['node_user', 'node_pass']],
+ supports_check_mode=True,
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ if portal:
+ try:
+ portal = socket.getaddrinfo(portal, None)[0][4][0]
+ except socket.gaierror:
+ module.fail_json(msg="Portal address is incorrect")
+
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {}
+ result['changed'] = False
+
+ if discover:
+ if portal is None:
+ module.fail_json(msg="Need to specify at least the portal (ip) to discover")
+ elif check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg="Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg="Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(module, target)
+ elif not check:
+ if login:
+ target_login(module, target, portal, port)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(module, target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
new file mode 100644
index 00000000..7432c48a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author:
+- Patrik Lundin (@eest)
+short_description: Manage packages on OpenBSD
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+requirements:
+- python >= 2.5
+options:
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: yes
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ choices: [ absent, latest, present, installed, removed ]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ - Mutually exclusive with I(snapshot).
+ type: bool
+ default: no
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with I(build).
+ type: bool
+ default: no
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the C(build) option, allows overriding
+ the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration
+ file(s) in the old packages which are annotated with @extra in
+ the packaging-list.
+ type: bool
+ default: no
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums
+ before removing normal files.
+ type: bool
+ default: no
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Make sure nmap is installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+
+- name: Make sure nmap is the latest version
+ community.general.openbsd_pkg:
+ name: nmap
+ state: latest
+
+- name: Make sure nmap is not installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: absent
+
+- name: Make sure nmap is installed, build it from source if it is not
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+ build: yes
+
+- name: Specify a pkg flavour with '--'
+ community.general.openbsd_pkg:
+ name: vim--no_x11
+ state: present
+
+- name: Specify the default flavour to avoid ambiguity errors
+ community.general.openbsd_pkg:
+ name: vim--
+ state: present
+
+- name: Specify a package branch (requires at least OpenBSD 6.0)
+ community.general.openbsd_pkg:
+ name: python%3.5
+ state: present
+
+- name: Update all packages on the system
+ community.general.openbsd_pkg:
+ name: '*'
+ state: latest
+
+- name: Purge a package and it's configuration files
+ community.general.openbsd_pkg:
+ name: mpd
+ clean: yes
+ state: absent
+
+- name: Quickly remove a package without checking checksums
+ community.general.openbsd_pkg:
+ name: qt5
+ quick: yes
+ state: absent
+'''
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+ return module.run_command(cmd_args)
+
+
+# Function used to find out if a package is currently installed.
+def get_package_state(names, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ for name in names:
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
+ pkg_spec[name]['installed_state'] = True
+ else:
+ pkg_spec[name]['installed_state'] = False
+
+
+# Function used to make sure a package is present.
+def package_present(names, pkg_spec, module):
+ build = module.params['build']
+
+ for name in names:
+ # It is possible package_present() has been called from package_latest().
+ # In that case we do not want to operate on the whole list of names,
+ # only the leftovers.
+ if pkg_spec['package_latest_leftovers']:
+ if name not in pkg_spec['package_latest_leftovers']:
+ module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
+ continue
+ else:
+ module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec[name]['flavor']:
+ flavors = pkg_spec[name]['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec[name]['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if module.params['snapshot'] is True:
+ install_cmd += ' -Dsnap'
+
+ if pkg_spec[name]['installed_state'] is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") even though a branch name may look like a
+ # version string it is not used an one by pkg_add.
+ if pkg_spec[name]['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code for name '%s'" % name)
+ if pkg_spec[name]['rc']:
+ pkg_spec[name]['changed'] = False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr for name '%s'" % name)
+ if pkg_spec[name]['stderr']:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
+
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package for name '%s'" % name)
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail for name '%s'" % name)
+ pkg_spec[name]['rc'] = 1
+ pkg_spec[name]['changed'] = False
+ else:
+ module.debug("package_present(): stderr was not set for name '%s'" % name)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to make sure a package is the latest available version.
+def package_latest(names, pkg_spec, module):
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ upgrade_cmd = 'pkg_add -um'
+
+ if module.check_mode:
+ upgrade_cmd += 'n'
+
+ if module.params['clean']:
+ upgrade_cmd += 'c'
+
+ if module.params['quick']:
+ upgrade_cmd += 'q'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+
+ # Attempt to upgrade the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ pkg_spec[name]['changed'] = False
+ for installed_name in pkg_spec[name]['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+
+ pkg_spec[name]['changed'] = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if pkg_spec[name]['changed'] is not True:
+ if pkg_spec[name]['stderr']:
+ pkg_spec[name]['rc'] = 1
+
+ else:
+ # Note packages that need to be handled by package_present
+ module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
+ pkg_spec['package_latest_leftovers'].append(name)
+
+ # If there were any packages that were not installed we call
+ # package_present() which will handle those.
+ if pkg_spec['package_latest_leftovers']:
+ module.debug("package_latest(): calling package_present() to handle leftovers")
+ package_present(names, pkg_spec, module)
+
+
+# Function used to make sure a package is not installed.
+def package_absent(names, pkg_spec, module):
+ remove_cmd = 'pkg_delete -I'
+
+ if module.check_mode:
+ remove_cmd += 'n'
+
+ if module.params['clean']:
+ remove_cmd += 'c'
+
+ if module.params['quick']:
+ remove_cmd += 'q'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+ # Attempt to remove the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+ else:
+ pkg_spec[name]['changed'] = False
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(names, pkg_spec, module):
+
+ # Initialize empty list of package_latest() leftovers.
+ pkg_spec['package_latest_leftovers'] = []
+
+ for name in names:
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # All information for a given name is kept in the pkg_spec keyed by that name.
+ pkg_spec[name] = {}
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = match.group('version')
+ pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'version'
+ module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
+ "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = '-'
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'versionless'
+ module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a
+ # stem, possibly with a branch (%branchname) tacked on at the
+ # end.
+ else:
+ match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = None
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = None
+ pkg_spec[name]['flavor'] = None
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'stem'
+ module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # Verify that the managed host is new enough to support branch syntax.
+ if pkg_spec[name]['branch']:
+ branch_release = "6.0"
+
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec[name]['flavor']:
+ match = re.search("-$", pkg_spec[name]['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
+
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec[name]['subpackage'] = None
+ if pkg_spec[name]['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec[name]['flavor']:
+ looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec[name]['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x: x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec[name]['subpackage'] = parts[1]
+ return parts[0]
+
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(pkg_spec, module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ # Create a minimal pkg_spec entry for '*' to store return values.
+ pkg_spec['*'] = {}
+
+ # Attempt to upgrade all packages.
+ pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurrence of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
+ if match:
+ pkg_spec['*']['changed'] = True
+
+ else:
+ pkg_spec['*']['changed'] = False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if pkg_spec['*']['stderr']:
+ pkg_spec['*']['rc'] = 1
+ else:
+ pkg_spec['*']['rc'] = 0
+
+
+# ===========================================
+# Main control flow.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build=dict(type='bool', default=False),
+ snapshot=dict(type='bool', default=False),
+ ports_dir=dict(type='path', default='/usr/ports'),
+ quick=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['snapshot', 'build']],
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ # The data structure used to keep track of package information.
+ pkg_spec = {}
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ parse_package_name(['sqlports'], pkg_spec, module)
+ get_package_state(['sqlports'], pkg_spec, module)
+ if not pkg_spec['sqlports']['installed_state']:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present(['sqlports'], pkg_spec, module)
+
+ asterisk_name = False
+ for n in name:
+ if n == '*':
+ if len(name) != 1:
+ module.fail_json(msg="the package name '*' can not be mixed with other names")
+
+ asterisk_name = True
+
+ if asterisk_name:
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ upgrade_packages(pkg_spec, module)
+ else:
+ # Parse package names and put results in the pkg_spec dictionary.
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ for n in name:
+ if pkg_spec[n]['branch'] and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
+
+ # Get state for all package names.
+ get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ package_present(name, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ package_absent(name, pkg_spec, module)
+ elif state == 'latest':
+ package_latest(name, pkg_spec, module)
+
+ # The combined changed status for all requested packages. If anything
+ # is changed this is set to True.
+ combined_changed = False
+
+ # The combined failed status for all requested packages. If anything
+ # failed this is set to True.
+ combined_failed = False
+
+ # We combine all error messages in this comma separated string, for example:
+ # "msg": "Can't find nmapp\n, Can't find nmappp\n"
+ combined_error_message = ''
+
+ # Loop over all requested package names and check if anything failed or
+ # changed.
+ for n in name:
+ if pkg_spec[n]['rc'] != 0:
+ combined_failed = True
+ if pkg_spec[n]['stderr']:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stderr']
+ else:
+ combined_error_message = pkg_spec[n]['stderr']
+ else:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stdout']
+ else:
+ combined_error_message = pkg_spec[n]['stdout']
+
+ if pkg_spec[n]['changed'] is True:
+ combined_changed = True
+
+ # If combined_error_message contains anything at least some part of the
+ # list of requested package names failed.
+ if combined_failed:
+ module.fail_json(msg=combined_error_message, **result)
+
+ result['changed'] = combined_changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
new file mode 100644
index 00000000..aa477e42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: Add or update OpenDJ backend properties
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BackendProp(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'passwordfile']],
+ required_one_of=[['password', 'passwordfile']]
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py
new file mode 100644
index 00000000..817ed9f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Andrew Gaffney <andrew@agaffney.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+short_description: Manage services on OpenWrt.
+description:
+ - Controls OpenWrt services on remote hosts.
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system (with python)
+'''
+
+EXAMPLES = '''
+- name: Start service httpd, if not running
+ community.general.openwrt_init:
+ state: started
+ name: httpd
+
+- name: Stop service cron, if running
+ community.general.openwrt_init:
+ name: cron
+ state: stopped
+
+- name: Reload service httpd, in all cases
+ community.general.openwrt_init:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd
+ community.general.openwrt_init:
+ name: httpd
+ enabled: yes
+'''
+
+RETURN = '''
+'''
+
+import os
+import glob
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+module = None
+init_script = None
+
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ (rc, out, err) = module.run_command("%s enabled" % init_script)
+ if rc == 0:
+ return True
+ return False
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ pattern=dict(type='str', required=False, default=None),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ (rc, psout, pserr) = module.run_command('%s w' % psbin)
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ for line in lines:
+ if module.params['pattern'] in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ running = True
+ break
+ else:
+ (rc, out, err) = module.run_command("%s running" % init_script)
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py
new file mode 100644
index 00000000..7da9a487
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt
+description:
+ - Manages OpenWrt packages
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent', 'installed', 'removed' ]
+ default: present
+ type: str
+ force:
+ description:
+ - opkg --force parameter used
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ type: str
+ update_cache:
+ description:
+ - update the package db first
+ aliases: ['update-cache']
+ default: "no"
+ type: bool
+requirements:
+ - opkg
+ - python
+'''
+EXAMPLES = '''
+- name: Install foo
+ community.general.opkg:
+ name: foo
+ state: present
+
+- name: Update cache and install foo
+ community.general.opkg:
+ name: foo
+ state: present
+ update_cache: yes
+
+- name: Remove foo
+ community.general.opkg:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar
+ community.general.opkg:
+ name: foo,bar
+ state: absent
+
+- name: Install foo using overwrite option forcibly
+ community.general.opkg:
+ name: foo
+ state: present
+ force: overwrite
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def update_package_db(module, opkg_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s update" % opkg_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, opkg_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, opkg_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
+
+ if query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, opkg_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
+
+ if not query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ opkg_path = module.get_bin_path('opkg', True, ['/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, opkg_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, opkg_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, opkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py
new file mode 100644
index 00000000..a0362908
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com> (DO NOT CONTACT!)
+# Copyright: (c) 2019, Ansible project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: osx_defaults
+author:
+# DO NOT RE-ADD GITHUB HANDLE!
+- Franck Nijhof (!UNKNOWN)
+short_description: Manage macOS user defaults
+description:
+ - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - macOS applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications are not running (such as default font for new
+ documents, or the position of an Info panel).
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form C(com.companyname.appname).
+ type: str
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply.
+ - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ type: str
+ key:
+ description:
+ - The key of the user preference.
+ type: str
+ type:
+ description:
+ - The type of value to write.
+ type: str
+ choices: [ array, bool, boolean, date, float, int, integer, string ]
+ default: string
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ type: bool
+ default: no
+ value:
+ description:
+ - The value to write.
+ - Only required when C(state=present).
+ type: raw
+ state:
+ description:
+ - The state of the user defaults.
+ - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
+ - C(list) added in version 2.8.
+ type: str
+ choices: [ absent, list, present ]
+ default: present
+ path:
+ description:
+ - The path in which to search for C(defaults).
+ type: str
+ default: /usr/bin:/usr/local/bin
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = r'''
+# TODO: Describe what happens in each example
+
+- community.general.osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- community.general.osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- community.general.osx_defaults:
+ domain: /Library/Preferences/com.apple.SoftwareUpdate
+ key: AutomaticCheckEnabled
+ type: int
+ value: 1
+ become: yes
+
+- community.general.osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- community.general.osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
+- community.general.osx_defaults:
+ key: AppleLanguages
+ type: array
+ value:
+ - en
+ - nl
+
+- community.general.osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
+'''
+
+from datetime import datetime
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import binary_type, text_type
+
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ def __init__(self, msg):
+ self.message = msg
+
+
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ def __init__(self, module):
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ # Initial var for storing current defaults value
+ self.current_value = None
+ self.module = module
+ self.domain = module.params['domain']
+ self.host = module.params['host']
+ self.key = module.params['key']
+ self.type = module.params['type']
+ self.array_add = module.params['array_add']
+ self.value = module.params['value']
+ self.state = module.params['state']
+ self.path = module.params['path']
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # Ensure the value is the correct type
+ if self.state != 'absent':
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ @staticmethod
+ def is_int(value):
+ as_str = str(value)
+ if (as_str.startswith("-")):
+ return as_str[1:].isdigit()
+ else:
+ return as_str.isdigit()
+
+ @staticmethod
+ def _convert_type(data_type, value):
+ """ Converts value to given type """
+ if data_type == "string":
+ return str(value)
+ elif data_type in ["bool", "boolean"]:
+ if isinstance(value, (binary_type, text_type)):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif data_type == "date":
+ try:
+ return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif data_type in ["int", "integer"]:
+ if not OSXDefaults.is_int(value):
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif data_type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif data_type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
+
+ def _host_args(self):
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ def _base_command(self):
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ return [self.executable] + self._host_args()
+
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+ """ Converts array output from defaults to an list """
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes
+ value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value]
+
+ return value
+
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ def read(self):
+ """ Reads value of this domain & key from defaults """
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exist
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out)
+
+ # Ok, lets parse the type from output
+ data_type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # An non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out)
+
+ # Convert string to list when type is array
+ if data_type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(data_type, out)
+
+ def write(self):
+ """ Writes value to this domain & key to defaults """
+ # We need to convert some values so the defaults commandline understands it
+ if isinstance(self.value, bool):
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif isinstance(self.value, (int, float)):
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out)
+
+ def delete(self):
+ """ Deletes defaults key from domain """
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ if self.state == 'list':
+ self.module.exit_json(key=self.key, value=self.current_value)
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', default='NSGlobalDomain'),
+ host=dict(type='str'),
+ key=dict(type='str'),
+ type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
+ array_add=dict(type='bool', default=False),
+ value=dict(type='raw'),
+ state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
+ path=dict(type='str', default='/usr/bin:/usr/local/bin'),
+ ),
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['value']),
+ ),
+ )
+
+ try:
+ defaults = OSXDefaults(module=module)
+ module.exit_json(changed=defaults.run())
+ except OSXDefaultsException as e:
+ module.fail_json(msg=e.message)
+
+
+# /main ------------------------------------------------------------------- }}}
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
new file mode 100644
index 00000000..7ed3a5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_failover
+short_description: Manage OVH IP failover address
+description:
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
+ an ip failover (or failover block) between services
+author: "Pascal HERAUD (@pascalheraud)"
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh >= 0.4.8
+options:
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like 1.1.1.1
+ or a block like 1.1.1.1/28 )
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If true, the module will wait for the IP address to be moved.
+ If false, exit without waiting. The taskId will be returned
+ in module output
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not 0, the module will wait for this task id to be
+ completed. Use wait_task_completion if you want to wait for
+ completion of a previously executed task with
+ wait_completion=false. You can execute this module repeatedly on
+ a list of failover IPs using wait_completion=false (see examples)
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+ type: int
+
+'''
+
+EXAMPLES = '''
+# Route an IP address 1.1.1.1 to the service ns666.ovh.net
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_completion: false
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+ register: moved
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_task_completion: "{{moved.taskId}}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote_plus
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while client.get('/ip/{0}/task'.format(quote_plus(name)),
+ function='genericMoveFloatingIp',
+ status='todo'):
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def waitForTaskDone(client, name, taskId, timeout):
+ currentTimeout = timeout
+ while True:
+ task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
+ if task['status'] == 'done':
+ return True
+ time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ currentTimeout -= 5
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service=dict(required=True),
+ endpoint=dict(required=True),
+ wait_completion=dict(default=True, type='bool'),
+ wait_task_completion=dict(default=0, type='int'),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ service = module.params.get('service')
+ timeout = module.params.get('timeout')
+ wait_completion = module.params.get('wait_completion')
+ wait_task_completion = module.params.get('wait_task_completion')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ ips = client.get('/ip', ip=name, type='failover')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of ips, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in ips and '{0}/32'.format(name) not in ips:
+ module.fail_json(msg='IP {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the properties '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if ipproperties['routedTo']['serviceName'] != service:
+ if not module.check_mode:
+ if wait_task_completion == 0:
+ # Move the IP and get the created taskId
+ task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
+ taskId = task['taskId']
+ result['moved'] = True
+ else:
+ # Just wait for the given taskId to be completed
+ taskId = wait_task_completion
+ result['moved'] = False
+ result['taskId'] = taskId
+ if wait_completion or wait_task_completion != 0:
+ if not waitForTaskDone(client, name, taskId, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of move ip to service'.format(timeout))
+ result['waited'] = True
+ else:
+ result['waited'] = False
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 00000000..965a499c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+author: Pascal Heraud (@pascalheraud)
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consumer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified
+ or deleted
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed.
+ type: int
+
+'''
+
+EXAMPLES = '''
+- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = module.params.get('weight')
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the probe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
new file mode 100644
index 00000000..75c70a79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Francois Lallart (@fraff)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovh_monthly_billing
+author: Francois Lallart (@fraff)
+version_added: '0.2.0'
+short_description: Manage OVH monthly billing
+description:
+ - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+requirements: [ "ovh" ]
+options:
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance ovh-eu)
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use
+ application_secret:
+ type: str
+ description:
+ - The application secret to use
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use
+'''
+
+EXAMPLES = '''
+- name: Basic usage, using auth from /etc/ovh.conf
+ community.general.ovh_monthly_billing:
+ project_id: 0c727a20aa144485b70c44dee9123b46
+ instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
+
+# Get openstack cloud ID and instance ID, OVH use them in its API
+- name: Get openstack cloud ID and instance ID
+ os_server_info:
+ cloud: myProjectName
+ region_name: myRegionName
+ server: myServerName
+ register: openstack_servers
+
+- name: Use IDs
+ community.general.ovh_monthly_billing:
+ project_id: "{{ openstack_servers.0.tenant_id }}"
+ instance_id: "{{ openstack_servers.0.id }}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import os
+import sys
+import traceback
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+ OVH_IMPORT_ERROR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True),
+ instance_id=dict(required=True),
+ endpoint=dict(required=False),
+ application_key=dict(required=False, no_log=True),
+ application_secret=dict(required=False, no_log=True),
+ consumer_key=dict(required=False, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get parameters
+ project_id = module.params.get('project_id')
+ instance_id = module.params.get('instance_id')
+ endpoint = module.params.get('endpoint')
+ application_key = module.params.get('application_key')
+ application_secret = module.params.get('application_secret')
+ consumer_key = module.params.get('consumer_key')
+ project = ""
+ instance = ""
+ ovh_billing_status = ""
+
+ if not HAS_OVH:
+ module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
+
+ # Connect to OVH API
+ client = ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+ # Check that the instance exists
+ try:
+ project = client.get('/cloud/project/{0}'.format(project_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='project {0} does not exist'.format(project_id))
+
+ # Check that the instance exists
+ try:
+ instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
+
+ # Is monthlyBilling already enabled or pending ?
+ if instance['monthlyBilling'] is not None:
+ if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
+ module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Dry Run!")
+
+ try:
+ ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
+ module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
+ except APIError as apiError:
+ module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
+
+ # We should never reach here
+ module.fail_json(msg='Internal ovh_monthly_billing module error')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py
new file mode 100644
index 00000000..25e3081c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author:
+- Vincent Van der Kussen (@vincentvdk)
+short_description: oVirt/RHEV platform management
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: This module is for deprecated version of ovirt.
+ alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead
+description:
+ - This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ required: true
+ url:
+ description:
+ - The url of the oVirt instance.
+ type: str
+ required: true
+ instance_name:
+ description:
+ - The name of the instance to use.
+ type: str
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - Password of the user to authenticate with.
+ type: str
+ required: true
+ image:
+ description:
+ - The template to use for the instance.
+ type: str
+ resource_type:
+ description:
+ - Whether you want to deploy an image or create an instance from scratch.
+ type: str
+ choices: [ new, template ]
+ zone:
+ description:
+ - Deploy the image to this oVirt cluster.
+ type: str
+ instance_disksize:
+ description:
+ - Size of the instance's disk in GB.
+ type: str
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - The instance's number of CPUs.
+ type: str
+ default: 1
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - The name of the network interface in oVirt/RHEV.
+ type: str
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - The logical network the machine should belong to.
+ type: str
+ default: rhevm
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - The instance's amount of memory in MB.
+ type: str
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - Define whether the instance is a server, desktop or high_performance.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ type: str
+ choices: [ desktop, server, high_performance ]
+ default: server
+ aliases: [ vmtype ]
+ disk_alloc:
+ description:
+ - Define whether disk is thin or preallocated.
+ type: str
+ choices: [ preallocated, thin ]
+ default: thin
+ disk_int:
+ description:
+ - Interface type of the disk.
+ type: str
+ choices: [ ide, virtio ]
+ default: virtio
+ instance_os:
+ description:
+ - Type of Operating System.
+ type: str
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - Define the instance's number of cores.
+ type: str
+ default: 1
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - The Storage Domain where you want to create the instance's disk on.
+ type: str
+ region:
+ description:
+ - The oVirt/RHEV datacenter where you want to deploy to.
+ type: str
+ instance_dns:
+ description:
+ - Define the instance's Primary DNS server.
+ type: str
+ aliases: [ dns ]
+ instance_domain:
+ description:
+ - Define the instance's Domain.
+ type: str
+ aliases: [ domain ]
+ instance_hostname:
+ description:
+ - Define the instance's Hostname.
+ type: str
+ aliases: [ hostname ]
+ instance_ip:
+ description:
+ - Define the instance's IP.
+ type: str
+ aliases: [ ip ]
+ instance_netmask:
+ description:
+ - Define the instance's Netmask.
+ type: str
+ aliases: [ netmask ]
+ instance_gateway:
+ description:
+ - Define the instance's Gateway.
+ type: str
+ aliases: [ gateway ]
+ instance_rootpw:
+ description:
+ - Define the instance's Root password.
+ type: str
+ aliases: [ rootpw ]
+ instance_key:
+ description:
+ - Define the instance's Authorized key.
+ type: str
+ aliases: [ key ]
+ state:
+ description:
+ - Create, terminate or remove instances.
+ type: str
+ choices: [ absent, present, restart, shutdown, started ]
+ default: present
+requirements:
+ - ovirt-engine-sdk-python
+'''
+
+EXAMPLES = '''
+- name: Basic example to provision from image
+ community.general.ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template
+
+- name: Full example to create new instance from scratch
+ community.general.ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio
+
+- name: Stopping an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an instance with cloud init information
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.removed import removed_module
+
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except Exception:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
+ format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
+ format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except Exception:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except Exception:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm is None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
+ user=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ instance_name=dict(type='str', required=True, aliases=['vmname']),
+ password=dict(type='str', required=True, no_log=True),
+ image=dict(type='str'),
+ resource_type=dict(type='str', choices=['new', 'template']),
+ zone=dict(type='str'),
+ instance_disksize=dict(type='str', aliases=['vm_disksize']),
+ instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
+ instance_nic=dict(type='str', aliases=['vmnic']),
+ instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
+ instance_mem=dict(type='str', aliases=['vmmem']),
+ instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
+ disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
+ disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
+ instance_os=dict(type='str', aliases=['vmos']),
+ instance_cores=dict(type='str', default=1, aliases=['vmcores']),
+ instance_hostname=dict(type='str', aliases=['hostname']),
+ instance_ip=dict(type='str', aliases=['ip']),
+ instance_netmask=dict(type='str', aliases=['netmask']),
+ instance_gateway=dict(type='str', aliases=['gateway']),
+ instance_domain=dict(type='str', aliases=['domain']),
+ instance_dns=dict(type='str', aliases=['dns']),
+ instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True),
+ instance_key=dict(type='str', aliases=['key'], no_log=True),
+ sdomain=dict(type='str'),
+ region=dict(type='str'),
+ ),
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server, desktop or high_performance
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ # initialize connection
+ try:
+ c = conn(url + "/api", user, password)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ # vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py
new file mode 100644
index 00000000..e560e13e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_facts
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all affinity labels, which names start with label
+ ovirt_affinity_label_info:
+ name: label*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to VMs
+ which names start with postgres
+ ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west
+ ovirt_affinity_label_info:
+ host: west*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west or VMs which names start with postgres
+ ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py
new file mode 100644
index 00000000..4085a702
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_facts
+short_description: Retrieve information about the oVirt/RHV API
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_api_info) instead.
+description:
+ - "Retrieve information about the oVirt/RHV API."
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information oVirt API
+ ovirt_api_info:
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py
new file mode 100644
index 00000000..e4916a26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_facts
+short_description: Retrieve information about one or more oVirt/RHV clusters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all clusters which names start with production
+ ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py
new file mode 100644
index 00000000..0de72729
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_facts
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all data centers which names start with production
+ ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py
new file mode 100644
index 00000000..6e0c9f69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_facts
+short_description: Retrieve information about one or more oVirt/RHV disks
+author: "Katerina Koukiou (@KKoukiou)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all Disks which names start with centos
+ ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py
new file mode 100644
index 00000000..50a20654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_facts
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+author: "Chris Keller (@nasx)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_event_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_event_facts', 'community.general.ovirt_event_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait']
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py
new file mode 100644
index 00000000..f9ac8b97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_facts
+short_description: Retrieve information about one or more oVirt/RHV external providers
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_external_provider_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all image external providers named glance
+ ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=['os_image', 'os_network', 'os_volume', 'foreman'],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_external_provider_facts', 'community.general.ovirt_external_provider_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py
new file mode 100644
index 00000000..40b037f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_facts
+short_description: Retrieve information about one or more oVirt/RHV groups
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_group_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all groups which names start with admin
+ ovirt_group_info:
+ pattern: name=admin*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_group_facts', 'community.general.ovirt_group_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py
new file mode 100644
index 00000000..ea585e90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_facts
+short_description: Retrieve information about one or more oVirt/RHV hosts
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all hosts which names start with host and belong to data center west
+ ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+
+- name: Gather information about all hosts with cluster version 4.2
+ ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_facts', 'community.general.ovirt_host_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content']
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py
new file mode 100644
index 00000000..62af3e4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_facts
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+author: "Daniel Erez (@derez)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_storage_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about HostStorages with specified target and address
+ ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def _get_storage_type(params):
+ for sd_type in ['iscsi', 'fcp']:
+ if params.get(sd_type) is not None:
+ return sd_type
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_storage_facts', 'community.general.ovirt_host_storage_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ storage_type = _get_storage_type(module.params)
+ host_service = hosts_service.host_service(host_id)
+
+ if storage_type == 'iscsi':
+ # Login
+ iscsi = module.params.get('iscsi')
+ _login(host_service, iscsi)
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list()
+
+ if storage_type == 'iscsi':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.ISCSI]
+ if 'target' in iscsi:
+ filterred_host_storages = [host_storage for host_storage in filterred_host_storages
+ if iscsi.get('target') == host_storage.logical_units[0].target]
+ elif storage_type == 'fcp':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.FCP]
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in filterred_host_storages
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py
new file mode 100644
index 00000000..781dd858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_facts
+short_description: Retrieve information about one or more oVirt/RHV networks
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_network_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all networks which names start with vlan1
+ ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_network_facts', 'community.general.ovirt_network_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py
new file mode 100644
index 00000000..2cc1194f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_nic_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ required: true
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all NICs which names start with eth for VM named centos7
+ ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_nic_facts', 'community.general.ovirt_nic_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ nics_service = vms_service.service(vm.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py
new file mode 100644
index 00000000..52ba3624
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_facts
+short_description: Retrieve information about one or more oVirt/RHV permissions
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all permissions of user with username john
+ ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py
new file mode 100644
index 00000000..b2424305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_facts
+short_description: Retrieve information about one or more oVirt/RHV quotas
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_quota_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about quota named C<myquota> in Default datacenter
+ ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_quota_facts', 'community.general.ovirt_quota_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py
new file mode 100644
index 00000000..eeaeb610
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_facts
+short_description: Retrieve information about one or more oVirt scheduling policies
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_scheduling_policy_info) instead.
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all scheduling policies with name InClusterUpgrade
+ ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_scheduling_policy_facts', 'community.general.ovirt_scheduling_policy_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list()
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py
new file mode 100644
index 00000000..73746883
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_snapshot_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all snapshots which description start with update for VM named centos7
+ ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_snapshot_facts', 'community.general.ovirt_snapshot_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list()
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list()
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py
new file mode 100644
index 00000000..b9d814c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_facts
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_domain_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: >
+ Gather information about all storage domains which names
+ start with data and belong to data center west
+ ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_domain_facts', 'community.general.ovirt_storage_domain_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py
new file mode 100644
index 00000000..1c583278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which relate to a storage domain and are unregistered
+ ovirt_storage_template_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_template_facts', 'community.general.ovirt_storage_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(unregistered=True)
+ else:
+ templates = templates_service.list(max=module.params['max'])
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py
new file mode 100644
index 00000000..d0247948
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which relate to a storage domain and are unregistered
+ ovirt_vms_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_vm_facts', 'community.general.ovirt_storage_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True)
+ else:
+ vms = vms_service.list()
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py
new file mode 100644
index 00000000..c6e9b744
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_facts
+short_description: Retrieve information about one or more oVirt/RHV tags
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_tag_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all tags, which names start with tag
+ ovirt_tag_info:
+ name: tag*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to VM postgres
+ ovirt_tag_info:
+ vm: postgres
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to host west
+ ovirt_tag_info:
+ host: west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_tag_facts', 'community.general.ovirt_tag_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list()
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend([
+ tag for tag in hosts_service.host_service(host.id).tags_service().list()
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend([
+ tag for tag in vms_service.vm_service(vm.id).tags_service().list()
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py
new file mode 100644
index 00000000..7595c64a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which names start with centos and belongs to data center west
+ ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_template_facts', 'community.general.ovirt_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py
new file mode 100644
index 00000000..ce7ab8d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_facts
+short_description: Retrieve information about one or more oVirt/RHV users
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_user_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all users which first names start with john
+ ovirt_user_info:
+ pattern: name=john*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_user_facts', 'community.general.ovirt_user_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py
new file mode 100644
index 00000000..a5182755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ default: false
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which names start with centos and belong to cluster west
+ ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+- name: Gather info about next run configuration of virtual machine named myvm
+ ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vm_facts', 'community.general.ovirt_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py
new file mode 100644
index 00000000..24842be5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_facts
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vmpool_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all vm pools which names start with centos
+ ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vmpool_facts', 'community.general.ovirt_vmpool_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
new file mode 100644
index 00000000..4ec6010f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacemaker_cluster
+short_description: Manage pacemaker clusters
+author:
+- Mathieu Bultel (@matbu)
+description:
+ - This module can manage a pacemaker cluster and nodes from Ansible using
+ the pacemaker cli.
+options:
+ state:
+ description:
+ - Indicate desired state of the cluster
+ choices: [ cleanup, offline, online, restart ]
+ type: str
+ node:
+ description:
+ - Specify which node of the cluster you want to manage. None == the
+ cluster status itself, 'all' == check the status of all nodes.
+ type: str
+ timeout:
+ description:
+ - Timeout when the module should considered that the action has failed
+ default: 300
+ type: int
+ force:
+ description:
+ - Force the change of the cluster state
+ type: bool
+ default: 'yes'
+'''
+EXAMPLES = '''
+---
+- name: Set cluster Online
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Get cluster state
+ community.general.pacemaker_cluster:
+ state: online
+'''
+
+RETURN = '''
+changed:
+ description: True if the cluster state has changed
+ type: bool
+ returned: always
+out:
+ description: The output of the current state of the cluster. It return a
+ list of the nodes state.
+ type: str
+ sample: 'out: [[" overcloud-controller-0", " Online"]]}'
+ returned: always
+rc:
+ description: exit code of the module
+ type: bool
+ returned: always
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+
+
+def get_cluster_status(module):
+ cmd = "pcs cluster status"
+ rc, out, err = module.run_command(cmd)
+ if out in _PCS_CLUSTER_DOWN:
+ return 'offline'
+ else:
+ return 'online'
+
+
+def get_node_status(module, node='all'):
+ if node == 'all':
+ cmd = "pcs cluster pcsd-status %s" % node
+ else:
+ cmd = "pcs cluster pcsd-status"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ status = []
+ for o in out.splitlines():
+ status.append(o.split(':'))
+ return status
+
+
+def clean_cluster(module, timeout):
+ cmd = "pcs resource cleanup"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+
+def set_cluster(module, state, timeout, force):
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def set_node(module, state, timeout, force, node='all'):
+ # map states
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+
+ nodes_state = get_node_status(module, node)
+ for node in nodes_state:
+ if node[1].strip().lower() != state:
+ cmd = "%s %s" % (cmd, node[0].strip())
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ nodes_state = get_node_status(module)
+ for node in nodes_state:
+ if node[1].strip().lower() == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
+ node=dict(type='str'),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ changed = False
+ state = module.params['state']
+ node = module.params['node']
+ force = module.params['force']
+ timeout = module.params['timeout']
+
+ if state in ['online', 'offline']:
+ # Get cluster status
+ if node is None:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Fail to bring the cluster %s" % state)
+ else:
+ cluster_state = get_node_status(module, node)
+ # Check cluster state
+ for node_state in cluster_state:
+ if node_state[1].strip().lower() == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ # Set cluster status if needed
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_node_status(module, node)
+ module.exit_json(changed=True, out=cluster_state)
+
+ if state in ['restart']:
+ set_cluster(module, 'offline', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'offline':
+ set_cluster(module, 'online', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'online':
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
+
+ if state in ['cleanup']:
+ clean_cluster(module, timeout)
+ cluster_state = get_cluster_status(module)
+ module.exit_json(changed=True,
+ out=cluster_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py
new file mode 100644
index 00000000..911d99b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+author: "Michael Warkentin (@mwarkentin)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a bower package to install
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ type: bool
+ default: 'no'
+ production:
+ description:
+ - Install with --production flag
+ type: bool
+ default: 'no'
+ path:
+ type: path
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ type: path
+ description:
+ - Relative path to bower executable from install path
+ state:
+ type: str
+ description:
+ - The state of the bower package
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ type: str
+ description:
+ - The version to be installed
+'''
+
+EXAMPLES = '''
+- name: Install "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ community.general.bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ community.general.bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ community.general.bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: no
+- community.general.bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
+'''
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default=False, type='bool'),
+ production=dict(default=False, type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = module.params['path']
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if missing:
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if missing or outdated:
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py
new file mode 100644
index 00000000..8be17d6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+options:
+ executable:
+ type: str
+ description:
+ - The path to the bundler executable
+ state:
+ type: str
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ choices: [present, latest]
+ default: present
+ chdir:
+ type: path
+ description:
+ - The directory to execute the bundler commands from. This directory
+ needs to contain a valid Gemfile or .bundle/ directory
+ - If not specified, it will default to the temporary working directory
+ exclude_groups:
+ type: list
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ type: bool
+ default: 'no'
+ gemfile:
+ type: path
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - If not specified it will default to the Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ type: bool
+ default: 'no'
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will install gems in
+ ./vendor/bundle instead of the default location. Requires a Gemfile.lock
+ file to have been created prior
+ type: bool
+ default: 'no'
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ type: bool
+ default: 'yes'
+ gem_path:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ - If not specified the default RubyGems gem paths will be used.
+ binstub_directory:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ extra_args:
+ type: str
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES = '''
+- name: Install gems from a Gemfile in the current directory
+ community.general.bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
+
+- name: Exclude the production group from installing
+ community.general.bundler:
+ state: present
+ exclude_groups: production
+
+- name: Install gems into ./vendor/bundle
+ community.general.bundler:
+ state: present
+ deployment_mode: yes
+
+- name: Install gems using a Gemfile in another directory
+ community.general.bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
+
+- name: Update Gemfile in another directory
+ community.general.bundler:
+ state: latest
+ chdir: ~/rails_project
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ result = module.params.get('executable').split(' ')
+ else:
+ result = [module.get_bin_path('bundle', True)]
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py
new file mode 100644
index 00000000..3bc09c2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+description:
+ - >
+ Composer is a tool for dependency management in PHP. It allows you to
+ declare the dependent libraries your project needs and it will install
+ them in your project for you.
+options:
+ command:
+ type: str
+ description:
+ - Composer command like "install", "update" and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ executable:
+ type: path
+ description:
+ - Path to PHP Executable on the remote host, if PHP is not in PATH.
+ aliases: [ php_path ]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see --working-dir). This is required when
+ the command is not run globally.
+ - Will be ignored if C(global_command=true).
+ aliases: [ working-dir ]
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ aliases: [ global-command ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see --prefer-source).
+ default: false
+ type: bool
+ aliases: [ prefer-source ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see --prefer-dist).
+ default: false
+ type: bool
+ aliases: [ prefer-dist ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see --no-dev).
+ default: true
+ type: bool
+ aliases: [ no-dev ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see --no-scripts).
+ default: false
+ type: bool
+ aliases: [ no-scripts ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins ).
+ default: false
+ type: bool
+ aliases: [ no-plugins ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see --optimize-autoloader).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ aliases: [ optimize-autoloader ]
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitely enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ aliases: [ classmap-authoritative ]
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes
+ default: false
+ type: bool
+ aliases: [ apcu-autoloader ]
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ default: false
+ type: bool
+ aliases: [ ignore-platform-reqs ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
+'''
+
+EXAMPLES = '''
+- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
+ community.general.composer:
+ command: install
+ working_dir: /path/to/project
+
+- name: Install a new package
+ community.general.composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+- name: Clone and install a project with all dependencies
+ community.general.composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: yes
+
+- name: Install a package globally
+ community.general.composer:
+ command: require
+ global_command: yes
+ arguments: my/package
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_out(string):
+ return re.sub(r"\s+", " ", string).strip()
+
+
+def has_changed(string):
+ for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
+ if no_change in string:
+ return False
+
+ return True
+
+
+def get_available_options(module, command='install'):
+ # get all available options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s --format=json" % command)
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = module.from_json(out)
+ return command_help_json['definition']['options']
+
+
+def composer_command(module, command, arguments="", options=None, global_command=False):
+ if options is None:
+ options = []
+
+ if module.params['executable'] is None:
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ else:
+ php_path = module.params['executable']
+
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(default="install", type="str"),
+ arguments=dict(default="", type="str"),
+ executable=dict(type="path", aliases=["php_path"]),
+ working_dir=dict(type="path", aliases=["working-dir"]),
+ global_command=dict(default=False, type="bool", aliases=["global-command"]),
+ prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
+ prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
+ no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
+ no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
+ no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
+ apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]),
+ optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
+ classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]),
+ ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
+ ),
+ required_if=[('global_command', False, ['working_dir'])],
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ global_command = module.params['global_command']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no-plugins',
+ 'apcu_autoloader': 'acpu-autoloader',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'classmap_authoritative': 'classmap-authoritative',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.items():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ if 'dry-run' in available_options:
+ options.append('--dry-run')
+ else:
+ module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
+
+ rc, out, err = composer_command(module, command, arguments, options, global_command)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py
new file mode 100644
index 00000000..3b43b443
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Franck Cuny <franck@lumberjaph.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies.
+description:
+ - Manage Perl library dependencies.
+options:
+ name:
+ type: str
+ description:
+ - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ aliases: ["pkg"]
+ from_path:
+ type: path
+ description:
+ - The local directory from where to install
+ notest:
+ description:
+ - Do not run unit tests
+ type: bool
+ default: no
+ locallib:
+ description:
+ - Specify the install base to install modules
+ type: path
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use
+ type: str
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB
+ type: bool
+ default: no
+ installdeps:
+ description:
+ - Only install dependencies
+ type: bool
+ default: no
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ type: str
+ system_lib:
+ description:
+ - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
+ - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
+ type: bool
+ default: no
+ aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ type: path
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+author: "Franck Cuny (@fcuny)"
+'''
+
+EXAMPLES = '''
+- name: Install Dancer perl package
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install version 0.99_05 of the Plack perl package
+ community.general.cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
+
+- name: Install Dancer into the specified locallib
+ community.general.cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install perl dependencies from local directory
+ community.general.cpanm:
+ from_path: /srv/webapps/my_app/src/
+
+- name: Install Dancer perl package without running the unit tests in indicated locallib
+ community.general.cpanm:
+ name: Dancer
+ notest: True
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install Dancer perl package from a specific mirror
+ community.general.cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
+
+- name: Install Dancer perl package into the system root path
+ community.general.cpanm:
+ name: Dancer
+ system_lib: yes
+
+- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
+ community.general.cpanm:
+ name: Dancer
+ version: '1.0'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _is_package_installed(module, name, locallib, cpanm, version):
+ cmd = ""
+ if locallib:
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
+ res, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return res == 0
+
+
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
+ if from_path:
+ cmd = cpanm + " " + from_path
+ else:
+ cmd = cpanm + " " + name
+
+ if notest is True:
+ cmd = cmd + " -n"
+
+ if locallib is not None:
+ cmd = cmd + " -l " + locallib
+
+ if mirror is not None:
+ cmd = cmd + " --mirror " + mirror
+
+ if mirror_only is True:
+ cmd = cmd + " --mirror-only"
+
+ if installdeps is True:
+ cmd = cmd + " --installdeps"
+
+ if use_sudo is True:
+ cmd = cmd + " --sudo"
+
+ return cmd
+
+
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('cpanm', True)
+ return result
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, required=False, aliases=['pkg']),
+ from_path=dict(default=None, required=False, type='path'),
+ notest=dict(default=False, type='bool'),
+ locallib=dict(default=None, required=False, type='path'),
+ mirror=dict(default=None, required=False),
+ mirror_only=dict(default=False, type='bool'),
+ installdeps=dict(default=False, type='bool'),
+ system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[['name', 'from_path']],
+ )
+
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
+ mirror_only = module.params['mirror_only']
+ installdeps = module.params['installdeps']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
+
+ changed = False
+
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
+
+ if not installed:
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+
+ rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
+
+ if rc_cpanm != 0:
+ module.fail_json(msg=err_cpanm, cmd=cmd)
+
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
+ changed = True
+
+ module.exit_json(changed=changed, binary=cpanm, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py
new file mode 100644
index 00000000..5e1d7930
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: easy_install
+short_description: Installs Python libraries
+description:
+ - Installs Python libraries, optionally in a I(virtualenv)
+options:
+ name:
+ type: str
+ description:
+ - A Python library name
+ required: true
+ virtualenv:
+ type: str
+ description:
+ - an optional I(virtualenv) directory path to install into. If the
+ I(virtualenv) does not exist, it is created automatically
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: 'no'
+ virtualenv_command:
+ type: str
+ description:
+ - The command to create the virtual environment with. For example
+ C(pyvenv), C(virtualenv), C(virtualenv2).
+ default: virtualenv
+ executable:
+ type: str
+ description:
+ - The explicit executable or a pathname to the executable to be used to
+ run easy_install for a specific version of Python installed in the
+ system. For example C(easy_install-3.3), if there are both Python 2.7
+ and 3.3 installations in the system and you want to run easy_install
+ for the Python 3.3 installation.
+ default: easy_install
+ state:
+ type: str
+ description:
+ - The desired state of the library. C(latest) ensures that the latest version is installed.
+ choices: [present, latest]
+ default: present
+notes:
+ - Please note that the C(easy_install) module can only install Python
+ libraries. Thus this module is not able to remove libraries. It is
+ generally recommended to use the M(ansible.builtin.pip) module which you can first install
+ using M(community.general.easy_install).
+ - Also note that I(virtualenv) must be installed on the remote host if the
+ C(virtualenv) parameter is specified.
+requirements: [ "virtualenv" ]
+author: "Matt Wright (@mattupstate)"
+'''
+
+EXAMPLES = '''
+- name: Install or update pip
+ community.general.easy_install:
+ name: pip
+ state: latest
+
+- name: Install Bottle into the specified virtualenv
+ community.general.easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
+'''
+
+import os
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+
+
+def install_package(module, name, easy_install, executable_arguments):
+ cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
+ rc, out, err = module.run_command(cmd)
+ return rc, out, err
+
+
+def _is_package_installed(module, name, easy_install, executable_arguments):
+ # Copy and add to the arguments
+ executable_arguments = executable_arguments[:]
+ executable_arguments.append('--dry-run')
+ rc, out, err = install_package(module, name, easy_install, executable_arguments)
+ if rc:
+ module.fail_json(msg=err)
+ return 'Downloading' not in out
+
+
+def _get_easy_install(module, env=None, executable=None):
+ candidate_easy_inst_basenames = ['easy_install']
+ easy_install = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ easy_install = executable
+ else:
+ candidate_easy_inst_basenames.insert(0, executable)
+ if easy_install is None:
+ if env is None:
+ opt_dirs = []
+ else:
+ # Try easy_install with the virtualenv directory first.
+ opt_dirs = ['%s/bin' % env]
+ for basename in candidate_easy_inst_basenames:
+ easy_install = module.get_bin_path(basename, False, opt_dirs)
+ if easy_install is not None:
+ break
+ # easy_install should have been found by now. The final call to
+ # get_bin_path will trigger fail_json.
+ if easy_install is None:
+ basename = candidate_easy_inst_basenames[0]
+ easy_install = module.get_bin_path(basename, True, opt_dirs)
+ return easy_install
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'latest'],
+ type='str'),
+ virtualenv=dict(default=None, required=False),
+ virtualenv_site_packages=dict(default=False, type='bool'),
+ virtualenv_command=dict(default='virtualenv', required=False),
+ executable=dict(default='easy_install', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ env = module.params['virtualenv']
+ executable = module.params['executable']
+ site_packages = module.params['virtualenv_site_packages']
+ virtualenv_command = module.params['virtualenv_command']
+ executable_arguments = []
+ if module.params['state'] == 'latest':
+ executable_arguments.append('--upgrade')
+
+ rc = 0
+ err = ''
+ out = ''
+
+ if env:
+ virtualenv = module.get_bin_path(virtualenv_command, True)
+
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ command = '%s %s' % (virtualenv, env)
+ if site_packages:
+ command += ' --system-site-packages'
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
+
+ rc += rc_venv
+ out += out_venv
+ err += err_venv
+
+ easy_install = _get_easy_install(module, env, executable)
+
+ cmd = None
+ changed = False
+ installed = _is_package_installed(module, name, easy_install, executable_arguments)
+
+ if not installed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
+
+ rc += rc_easy_inst
+ out += out_easy_inst
+ err += err_easy_inst
+
+ changed = True
+
+ if rc != 0:
+ module.fail_json(msg=err, cmd=cmd)
+
+ module.exit_json(changed=changed, binary=easy_install,
+ name=name, virtualenv=env)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py
new file mode 100644
index 00000000..516c9b0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gem
+short_description: Manage Ruby gems
+description:
+ - Manage installation and uninstallation of Ruby gems.
+options:
+ name:
+ type: str
+ description:
+ - The name of the gem to be managed.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ required: false
+ choices: [present, absent, latest]
+ default: present
+ gem_source:
+ type: path
+ description:
+ - The path to a local gem used as installation source.
+ required: false
+ include_dependencies:
+ description:
+ - Whether to include dependencies or not.
+ required: false
+ type: bool
+ default: "yes"
+ repository:
+ type: str
+ description:
+ - The repository from which the gem will be installed
+ required: false
+ aliases: [source]
+ user_install:
+ description:
+ - Install gem in user's local gems cache or for all users
+ required: false
+ type: bool
+ default: "yes"
+ executable:
+ type: path
+ description:
+ - Override the path to the gem executable
+ required: false
+ install_dir:
+ type: path
+ description:
+ - Install the gems into a specific directory.
+ These gems will be independent from the global installed ones.
+ Specifying this requires user_install to be false.
+ required: false
+ env_shebang:
+ description:
+ - Rewrite the shebang line on installed scripts to use /usr/bin/env.
+ required: false
+ default: "no"
+ type: bool
+ version:
+ type: str
+ description:
+ - Version of the gem to be installed/removed.
+ required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: "no"
+ type: bool
+ include_doc:
+ description:
+ - Install with or without docs.
+ required: false
+ default: "no"
+ type: bool
+ build_flags:
+ type: str
+ description:
+ - Allow adding build flags for gem compilation
+ required: false
+ force:
+ description:
+ - Force gem to install, bypassing dependency checks.
+ required: false
+ default: "no"
+ type: bool
+author:
+ - "Ansible Core Team"
+ - "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+- name: Install version 1.0 of vagrant
+ community.general.gem:
+ name: vagrant
+ version: 1.0
+ state: present
+
+- name: Install latest available version of rake
+ community.general.gem:
+ name: rake
+ state: latest
+
+- name: Install rake version 1.0 from a local gem on disk
+ community.general.gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_rubygems_path(module):
+ if module.params['executable']:
+ result = module.params['executable'].split(' ')
+ else:
+ result = [module.get_bin_path('gem', True)]
+ return result
+
+
+def get_rubygems_version(module):
+ cmd = get_rubygems_path(module) + ['--version']
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
+ if not match:
+ return None
+
+ return tuple(int(x) for x in match.groups())
+
+
+def get_rubygems_environ(module):
+ if module.params['install_dir']:
+ return {'GEM_HOME': module.params['install_dir']}
+ return None
+
+
+def get_installed_versions(module, remote=False):
+
+ cmd = get_rubygems_path(module)
+ cmd.append('query')
+ if remote:
+ cmd.append('--remote')
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ cmd.append('-n')
+ cmd.append('^%s$' % module.params['name'])
+
+ environ = get_rubygems_environ(module)
+ (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
+ installed_versions = []
+ for line in out.splitlines():
+ match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
+ if match:
+ versions = match.group(1)
+ for version in versions.split(', '):
+ installed_versions.append(version.split()[0])
+ return installed_versions
+
+
+def exists(module):
+ if module.params['state'] == 'latest':
+ remoteversions = get_installed_versions(module, remote=True)
+ if remoteversions:
+ module.params['version'] = remoteversions[0]
+ installed_versions = get_installed_versions(module)
+ if module.params['version']:
+ if module.params['version'] in installed_versions:
+ return True
+ else:
+ if installed_versions:
+ return True
+ return False
+
+
+def uninstall(module):
+
+ if module.check_mode:
+ return
+ cmd = get_rubygems_path(module)
+ environ = get_rubygems_environ(module)
+ cmd.append('uninstall')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ else:
+ cmd.append('--all')
+ cmd.append('--executable')
+ cmd.append(module.params['name'])
+ module.run_command(cmd, environ_update=environ, check_rc=True)
+
+
+def install(module):
+
+ if module.check_mode:
+ return
+
+ ver = get_rubygems_version(module)
+ if ver:
+ major = ver[0]
+ else:
+ major = None
+
+ cmd = get_rubygems_path(module)
+ cmd.append('install')
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ if not module.params['include_dependencies']:
+ cmd.append('--ignore-dependencies')
+ else:
+ if major and major < 2:
+ cmd.append('--include-dependencies')
+ if module.params['user_install']:
+ cmd.append('--user-install')
+ else:
+ cmd.append('--no-user-install')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+ if module.params['pre_release']:
+ cmd.append('--pre')
+ if not module.params['include_doc']:
+ if major and major < 2:
+ cmd.append('--no-rdoc')
+ cmd.append('--no-ri')
+ else:
+ cmd.append('--no-document')
+ if module.params['env_shebang']:
+ cmd.append('--env-shebang')
+ cmd.append(module.params['gem_source'])
+ if module.params['build_flags']:
+ cmd.extend(['--', module.params['build_flags']])
+ if module.params['force']:
+ cmd.append('--force')
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ install_dir=dict(required=False, type='path'),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
+ force=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
+ )
+
+ if module.params['version'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot specify version when state=latest")
+ if module.params['gem_source'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot maintain state=latest when installing from local source")
+ if module.params['user_install'] and module.params['install_dir']:
+ module.fail_json(msg="install_dir requires user_install=false")
+
+ if not module.params['gem_source']:
+ module.params['gem_source'] = module.params['name']
+
+ changed = False
+
+ if module.params['state'] in ['present', 'latest']:
+ if not exists(module):
+ install(module)
+ changed = True
+ elif module.params['state'] == 'absent':
+ if exists(module):
+ uninstall(module)
+ changed = True
+
+ result = {}
+ result['name'] = module.params['name']
+ result['state'] = module.params['state']
+ if module.params['version']:
+ result['version'] = module.params['version']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py
new file mode 100644
index 00000000..03c3d4d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
+ version if one is not available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - lxml
+ - boto if using a S3 repository (s3://...)
+options:
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate
+ - Mutually exclusive with I(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
+ - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
+ - Mutually exclusive with I(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ - Use file://... if the repository is local, added in version 2.6
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ aliases: [ "aws_secret_key" ]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ aliases: [ "aws_secret_access_key" ]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ default: 'no'
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact
+ default: present
+ choices: [present,absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ default: 10
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
+ type: bool
+ default: 'yes'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ is defined.
+ type: bool
+ default: 'no'
+ verify_checksum:
+ type: str
+ description:
+ - If C(never), the md5 checksum will never be downloaded and verified.
+ - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If C(change), the md5 checksum will be downloaded and verified if the destination already exist,
+ to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe)
+ downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
+ if the artifact has not been cached yet, it may fail unexpectedly.
+ If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ use it to verify integrity after download.
+ - C(always) combines C(download) and C(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+extends_documentation_fragment:
+ - files
+'''
+
+EXAMPLES = '''
+- name: Download the latest version of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+- name: Download JUnit 4.11 from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
+
+- name: Download an artifact from a private repository requiring authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
+
+- name: Download an artifact from a private repository requiring certificate authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ client_cert: /path/to/cert.pem
+ client_key: /path/to/key.pem
+ dest: /tmp/library-name-latest.jar
+
+- name: Download a WAR File to the Tomcat webapps directory to be deployed
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
+
+- name: Keep a downloaded artifact's name, i.e. retain the version
+ community.general.maven_artifact:
+ version: latest
+ artifact_id: spring-core
+ group_id: org.springframework
+ dest: /tmp/
+ keep_name: yes
+
+- name: Download the latest version of the JUnit framework artifact from Maven local
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+ repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
+
+- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version_by_spec: "[3.8,4.0)"
+ dest: /tmp/
+'''
+
+import hashlib
+import os
+import posixpath
+import shutil
+import io
+import tempfile
+import traceback
+import re
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from re import match
+
+LXML_ETREE_IMP_ERR = None
+try:
+ from lxml import etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+BOTO_IMP_ERR = None
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+SEMANTIC_VERSION_IMP_ERR = None
+try:
+ from semantic_version import Version, Spec
+ HAS_SEMANTIC_VERSION = True
+except ImportError:
+ SEMANTIC_VERSION_IMP_ERR = traceback.format_exc()
+ HAS_SEMANTIC_VERSION = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
+ if head == dirname:
+ return None, [head]
+ else:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return head, [tail]
+ new_directory_list.append(tail)
+ return pre_existing_dir, new_directory_list
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+ if new_directory_list:
+ first_sub_dir = new_directory_list.pop(0)
+ if not pre_existing_dir:
+ working_dir = first_sub_dir
+ else:
+ working_dir = os.path.join(pre_existing_dir, first_sub_dir)
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.version_by_spec = version_by_spec
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version)
+ if timestamp_version_match:
+ base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT")
+ else:
+ base = posixpath.join(base, self.version)
+ return base
+
+ def _generate_filename(self):
+ filename = self.artifact_id + "-" + self.classifier + "." + self.extension
+ if not self.classifier:
+ filename = self.artifact_id + "." + self.extension
+ return filename
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+ if self.classifier:
+ result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ return result
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[len(parts) - 1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base, local=False, headers=None):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.local = local
+ self.headers = headers
+ self.user_agent = "Ansible {0} maven_artifact".format(ansible_version)
+ self.latest_version_found = None
+ self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
+
+ def find_version_by_spec(self, artifact):
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ original_versions = xml.xpath("/metadata/versioning/versions/version/text()")
+ versions = []
+ for version in original_versions:
+ try:
+ versions.append(Version.coerce(version))
+ except ValueError:
+ # This means that version string is not a valid semantic versioning
+ pass
+
+ parse_versions_syntax = {
+ # example -> (,1.0]
+ r"^\(,(?P<upper_bound>[0-9.]*)]$": "<={upper_bound}",
+ # example -> 1.0
+ r"^(?P<version>[0-9.]*)$": "~={version}",
+ # example -> [1.0]
+ r"^\[(?P<version>[0-9.]*)\]$": "=={version}",
+ # example -> [1.2, 1.3]
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]*)\]$": ">={lower_bound},<={upper_bound}",
+ # example -> [1.2, 1.3)
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]+)\)$": ">={lower_bound},<{upper_bound}",
+ # example -> [1.5,)
+ r"^\[(?P<lower_bound>[0-9.]*),\)$": ">={lower_bound}",
+ }
+
+ for regex, spec_format in parse_versions_syntax.items():
+ regex_result = match(regex, artifact.version_by_spec)
+ if regex_result:
+ spec = Spec(spec_format.format(**regex_result.groupdict()))
+ selected_version = spec.select(versions)
+
+ if not selected_version:
+ raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec))
+
+ # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0)
+ if str(selected_version) not in original_versions:
+ selected_version.patch = None
+
+ return str(selected_version)
+
+ raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec))
+
+ def find_latest_version_available(self, artifact):
+ if self.latest_version_found:
+ return self.latest_version_found
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ self.latest_version_found = v[0]
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version_by_spec:
+ artifact.version = self.find_version_by_spec(artifact)
+
+ if artifact.version == "latest":
+ artifact.version = self.find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ if self.local:
+ return self._uri_for_artifact(artifact, artifact.version)
+ path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ classifier = snapshotArtifact.xpath("classifier/text()")
+ artifact_classifier = classifier[0] if classifier else ''
+ extension = snapshotArtifact.xpath("extension/text()")
+ artifact_extension = extension[0] if extension else ''
+ if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
+ timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
+ if timestamp_xmlpath:
+ timestamp = timestamp_xmlpath[0]
+ build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ # for small files, directly get the full content
+ def _getContent(self, url, failmsg, force=True):
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ with io.open(parsed_url.path, 'rb') as f:
+ return f.read()
+ if force:
+ raise ValueError(failmsg + " because can not find file: " + url)
+ return None
+ response = self._request(url, failmsg, force)
+ if response:
+ return response.read()
+ return None
+
+ # only for HTTP request
+ def _request(self, url, failmsg, force=True):
+ url_to_use = url
+ parsed_url = urlparse(url)
+
+ if parsed_url.scheme == 's3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.user_agent
+
+ response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers)
+ if info['status'] == 200:
+ return response
+ if force:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ return None
+
+ def download(self, tmpdir, artifact, verify_download, filename=None):
+ if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None,
+ artifact.classifier, artifact.extension)
+ url = self.find_uri_for_artifact(artifact)
+ tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
+
+ try:
+ # copy to temp file
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ shutil.copy2(parsed_url.path, tempname)
+ else:
+ return "Can not find local file: " + parsed_url.path
+ else:
+ response = self._request(url, "Failed to download artifact " + str(artifact))
+ with os.fdopen(tempfd, 'wb') as f:
+ shutil.copyfileobj(response, f)
+
+ if verify_download:
+ invalid_md5 = self.is_invalid_md5(tempname, url)
+ if invalid_md5:
+ # if verify_change was set, the previous file would be deleted
+ os.remove(tempname)
+ return invalid_md5
+ except Exception as e:
+ os.remove(tempname)
+ raise e
+
+ # all good, now copy temp file to target
+ shutil.move(tempname, artifact.get_filename(filename))
+ return None
+
+ def is_invalid_md5(self, file, remote_url):
+ if os.path.exists(file):
+ local_md5 = self._local_md5(file)
+ if self.local:
+ parsed_url = urlparse(remote_url)
+ remote_md5 = self._local_md5(parsed_url.path)
+ else:
+ try:
+ remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict')
+ except UnicodeError as e:
+ return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e))
+ if(not remote_md5):
+ return "Cannot find md5 from " + remote_url
+ try:
+ # Check if remote md5 only contains md5 or md5 + filename
+ _remote_md5 = remote_md5.split(None)[0]
+ remote_md5 = _remote_md5
+ # remote_md5 is empty so we continue and keep original md5 string
+ # This should not happen since we check for remote_md5 before
+ except IndexError:
+ pass
+ if local_md5.lower() == remote_md5.lower():
+ return None
+ else:
+ return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5
+
+ return "Path does not exist: " + file
+
+ def _local_md5(self, file):
+ md5 = hashlib.md5()
+ with io.open(file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ md5.update(chunk)
+ return md5.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group_id=dict(required=True),
+ artifact_id=dict(required=True),
+ version=dict(default=None),
+ version_by_spec=dict(default=None),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default='https://repo1.maven.org/maven2'),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ headers=dict(type='dict'),
+ force_basic_auth=dict(default=False, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", required=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ client_cert=dict(type="path", required=False),
+ client_key=dict(type="path", required=False),
+ keep_name=dict(required=False, default=False, type='bool'),
+ verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
+ directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure
+ # if this should really be here.
+ ),
+ add_file_common_args=True,
+ mutually_exclusive=([('version', 'version_by_spec')])
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
+ module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR)
+
+ repository_url = module.params["repository_url"]
+ if not repository_url:
+ repository_url = "https://repo1.maven.org/maven2"
+ try:
+ parsed_url = urlparse(repository_url)
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ local = parsed_url.scheme == "file"
+
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'),
+ exception=BOTO_IMP_ERR)
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ version_by_spec = module.params["version_by_spec"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ headers = module.params['headers']
+ state = module.params["state"]
+ dest = module.params["dest"]
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ keep_name = module.params["keep_name"]
+ verify_checksum = module.params["verify_checksum"]
+ verify_download = verify_checksum in ['download', 'always']
+ verify_change = verify_checksum in ['change', 'always']
+
+ downloader = MavenDownloader(module, repository_url, local, headers)
+
+ if not version_by_spec and not version:
+ version = "latest"
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ changed = False
+ prev_state = "absent"
+
+ if dest.endswith(os.sep):
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
+ os.makedirs(b_dest)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ version_part = version
+ if version == 'latest':
+ version_part = downloader.find_latest_version_available(artifact)
+ elif version_by_spec:
+ version_part = downloader.find_version_by_spec(artifact)
+
+ filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
+ artifact_id=artifact_id,
+ version_part="-{0}".format(version_part) if keep_name else "",
+ classifier="-{0}".format(classifier) if classifier else "",
+ extension=extension
+ )
+ dest = posixpath.join(dest, filename)
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
+ prev_state = "present"
+
+ if prev_state == "absent":
+ try:
+ download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
+ if download_error is None:
+ changed = True
+ else:
+ module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+ if changed:
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=changed)
+ else:
+ module.exit_json(state=state, dest=dest, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py
new file mode 100644
index 00000000..3ef81eaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm).
+author: "Chris Hoffman (@chrishoffman)"
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version to be installed.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: no
+ type: bool
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: no
+ unsafe_perm:
+ description:
+ - Use the C(--unsafe-perm) flag when installing.
+ type: bool
+ default: no
+ ci:
+ description:
+ - Install packages based on package-lock file, same as running C(npm ci).
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies.
+ required: false
+ type: bool
+ default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ type: str
+ state:
+ description:
+ - The state of the node.js library.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - npm installed in bin path (recommended /usr/local/bin)
+'''
+
+EXAMPLES = r'''
+- name: Install "coffee-script" node.js package.
+ community.general.npm:
+ name: coffee-script
+ path: /app/location
+
+- name: Install "coffee-script" node.js package on version 1.6.1.
+ community.general.npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
+
+- name: Install "coffee-script" node.js package globally.
+ community.general.npm:
+ name: coffee-script
+ global: yes
+
+- name: Remove the globally package "coffee-script".
+ community.general.npm:
+ name: coffee-script
+ global: yes
+ state: absent
+
+- name: Install "coffee-script" node.js package from custom registry.
+ community.general.npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.npm:
+ path: /app/location
+
+- name: Update packages based on package.json to their latest version.
+ community.general.npm:
+ path: /app/location
+ state: latest
+
+- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
+ community.general.npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
+'''
+
+import json
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.unsafe_perm = kwargs['unsafe_perm']
+ self.state = kwargs['state']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version'] and self.state != 'absent':
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production and ('install' in cmd or 'update' in cmd):
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.unsafe_perm:
+ cmd.append('--unsafe-perm')
+ if self.name and add_package_name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json', '--long']
+
+ installed = list()
+ missing = list()
+ data = {}
+ try:
+ data = json.loads(self._exec(cmd, True, False, False) or '{}')
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
+ missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if self.name and self.name not in installed:
+ missing.append(self.name)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def ci_install(self):
+ return self._exec(['ci'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split(r'\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, type='str'),
+ path=dict(default=None, type='path'),
+ version=dict(default=None, type='str'),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ unsafe_perm=dict(default=False, type='bool'),
+ ci=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+ unsafe_perm = module.params['unsafe_perm']
+ ci = module.params['ci']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts,
+ unsafe_perm=unsafe_perm, state=state)
+
+ changed = False
+ if ci:
+ npm.ci_install()
+ changed = True
+ elif state == 'present':
+ installed, missing = npm.list()
+ if missing:
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if missing:
+ changed = True
+ npm.install()
+ if outdated:
+ changed = True
+ npm.update()
+ else: # absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py
new file mode 100644
index 00000000..fef04d32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <https://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+author:
+ - Jonathan Lestrelin (@jle64) <jonathan.lestrelin@gmail.com>
+options:
+ name:
+ type: str
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+ aliases: [pkg]
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "installed", "latest", "absent", "removed"]
+ executable:
+ type: path
+ description:
+ - Path to the pear executable.
+ prompts:
+ description:
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
+ - Prompts will be processed in the same order as the packages list.
+ - You can optionnally specify an answer to any question in the list.
+ - If no answer is provided, the list item will only contain the regular expression.
+ - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - You can provide a list containing items with or without answer.
+ - A prompt list can be shorter or longer than the packages list but will issue a warning.
+ - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ type: list
+ elements: raw
+ version_added: 0.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install pear package
+ community.general.pear:
+ name: Net_URL2
+ state: present
+
+- name: Install pecl package
+ community.general.pear:
+ name: pecl/json_post
+ state: present
+
+- name: Install pecl package with expected prompt
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]
+
+- name: Install pecl package with expected prompt and an answer
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once with prompts.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - I am a test prompt because gnupg doesnt asks anything
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once skipping the first prompt.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - null
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Upgrade package
+ community.general.pear:
+ name: Net_URL2
+ state: latest
+
+- name: Remove packages
+ community.general.pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-':
+ continue
+ return installed
+ return None
+
+
+def _get_pear_path(module):
+ if module.params['executable'] and os.path.isfile(module.params['executable']):
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('pear', True, [module.params['executable']])
+ return result
+
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "%s info %s" % (_get_pear_path(module), name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "%s uninstall %s" % (_get_pear_path(module), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr)))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages, prompts):
+ install_c = 0
+ has_prompt = bool(prompts)
+ default_stdin = "\n"
+
+ if has_prompt:
+ nb_prompts = len(prompts)
+ nb_packages = len(packages)
+
+ if nb_prompts > 0 and (nb_prompts != nb_packages):
+ if nb_prompts > nb_packages:
+ diff = nb_prompts - nb_packages
+ msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ else:
+ diff = nb_packages - nb_prompts
+ msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \
+ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ module.warn(msg)
+
+ # Preparing prompts answer according to item type
+ tmp_prompts = []
+ for _item in prompts:
+ # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # We also expect here that the dict only has ONE key and the first key will be taken
+ if isinstance(_item, dict):
+ key = list(_item.keys())[0]
+ answer = _item[key] + "\n"
+
+ tmp_prompts.append((key, answer))
+ elif not _item:
+ tmp_prompts.append((None, default_stdin))
+ else:
+ tmp_prompts.append((_item, default_stdin))
+ prompts = tmp_prompts
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ if has_prompt and i < len(prompts):
+ prompt_regex = prompts[i][0]
+ data = prompts[i][1]
+ else:
+ prompt_regex = None
+ data = default_stdin
+
+ cmd = "%s %s %s" % (_get_pear_path(module), command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr)))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path'),
+ prompts=dict(default=None, required=False, type='list', elements='raw'),
+ ),
+ supports_check_mode=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs, p["prompts"])
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py
new file mode 100644
index 00000000..08eb2e95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# started out with AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pip_package_info
+short_description: pip package information
+description:
+ - Return information about installed pip packages
+options:
+ clients:
+ description:
+ - A list of the pip executables that will be used to get the packages.
+ They can be supplied with the full path or just the executable name, i.e `pip3.7`.
+ default: ['pip']
+ required: False
+ type: list
+requirements:
+ - The requested pip executables must be installed on the target.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Just get the list from default pip
+ community.general.pip_package_info:
+
+- name: Get the facts for default pip, pip2 and pip3.6
+ community.general.pip_package_info:
+ clients: ['pip', 'pip2', 'pip3.6']
+
+- name: Get from specific paths (virtualenvs?)
+ community.general.pip_package_info:
+ clients: '/home/me/projec42/python/pip3.5'
+'''
+
+RETURN = '''
+packages:
+ description: a dictionary of installed package data
+ returned: always
+ type: dict
+ contains:
+ python:
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
+ returned: always
+ type: dict
+ sample:
+ "packages": {
+ "pip": {
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ],
+ },
+ }
+'''
+import json
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.packages import CLIMgr
+
+
+class PIP(CLIMgr):
+
+ def __init__(self, pip):
+
+ self.CLI = pip
+
+ def list_installed(self):
+ global module
+ rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json'])
+ if rc != 0:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return json.loads(out)
+
+ def get_package_details(self, package):
+ package['source'] = self.CLI
+ return package
+
+
+def main():
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True)
+ packages = {}
+ results = {'packages': {}}
+ clients = module.params['clients']
+
+ found = 0
+ for pip in clients:
+
+ if not os.path.basename(pip).startswith('pip'):
+ module.warn('Skipping invalid pip client: %s' % (pip))
+ continue
+ try:
+ pip_mgr = PIP(pip)
+ if pip_mgr.is_available():
+ found += 1
+ packages[pip] = pip_mgr.get_packages()
+ except Exception as e:
+ module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e)))
+ continue
+
+ if found == 0:
+ module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients)
+
+ # return info
+ results['packages'] = packages
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py
new file mode 100644
index 00000000..77489e24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017 David Gunter <david.gunter@tivix.com>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yarn
+short_description: Manage node.js packages with Yarn
+description:
+ - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
+author:
+ - "David Gunter (@verkaufer)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a node.js library to install
+ - If omitted all packages in package.json are installed.
+ - To globally install from local node.js library. Prepend "file:" to the path of the node.js library.
+ required: false
+ path:
+ type: path
+ description:
+ - The base path where Node.js libraries will be installed.
+ - This is where the node_modules folder lives.
+ required: false
+ version:
+ type: str
+ description:
+ - The version of the library to be installed.
+ - Must be in semver format. If "latest" is desired, use "state" arg instead
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: no
+ type: bool
+ executable:
+ type: path
+ description:
+ - The executable location for yarn.
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Yarn will ignore any dependencies under devDependencies in package.json
+ required: false
+ type: bool
+ default: no
+ registry:
+ type: str
+ description:
+ - The registry to install modules from.
+ required: false
+ state:
+ type: str
+ description:
+ - Installation state of the named node.js library
+ - If absent is selected, a name option must be provided
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - Yarn installed in bin path (typically /usr/local/bin)
+'''
+
+EXAMPLES = '''
+- name: Install "imagemin" node.js package.
+ community.general.yarn:
+ name: imagemin
+ path: /app/location
+
+- name: Install "imagemin" node.js package on version 5.3.1
+ community.general.yarn:
+ name: imagemin
+ version: '5.3.1'
+ path: /app/location
+
+- name: Install "imagemin" node.js package globally.
+ community.general.yarn:
+ name: imagemin
+ global: yes
+
+- name: Remove the globally-installed package "imagemin".
+ community.general.yarn:
+ name: imagemin
+ global: yes
+ state: absent
+
+- name: Install "imagemin" node.js package from custom registry.
+ community.general.yarn:
+ name: imagemin
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.yarn:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.yarn:
+ path: /app/location
+ state: latest
+'''
+
+RETURN = '''
+changed:
+ description: Whether Yarn changed any package data
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Provides an error message if Yarn syntax was incorrect
+ returned: failure
+ type: str
+ sample: "Package must be explicitly named when uninstalling."
+invocation:
+ description: Parameters and values used during execution
+ returned: success
+ type: dict
+ sample: {
+ "module_args": {
+ "executable": null,
+ "globally": false,
+ "ignore_scripts": false,
+ "name": null,
+ "path": "/some/path/folder",
+ "production": false,
+ "registry": null,
+ "state": "present",
+ "version": null
+ }
+ }
+out:
+ description: Output generated from Yarn with emojis removed.
+ returned: always
+ type: str
+ sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
+ Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s."
+'''
+
+import os
+import re
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Yarn(object):
+
+ DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global'
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.globally = kwargs['globally']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+
+ # Specify a version of package if version arg passed in
+ self.name_version = None
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('yarn', True)]
+
+ if kwargs['version'] and self.name is not None:
+ self.name_version = self.name + '@' + str(self.version)
+ elif self.name is not None:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+
+ if self.globally:
+ # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
+ args.insert(0, 'global')
+
+ cmd = self.executable + args
+
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # always run Yarn without emojis when called via Ansible
+ cmd.append('--no-emoji')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path and not self.globally:
+ if not os.path.exists(self.path):
+ # Module will make directory if not exists.
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
+ cwd = self.path
+
+ if not os.path.isfile(os.path.join(self.path, 'package.json')):
+ self.module.fail_json(msg="Package.json does not exist in provided path.")
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return(None, None)
+
+ def list(self):
+ cmd = ['list', '--depth=0', '--json']
+
+ installed = list()
+ missing = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ missing.append(self.name)
+ return installed, missing
+
+ result, error = self._exec(cmd, True, False)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ data = json.loads(result)
+ try:
+ dependencies = data['data']['trees']
+ except KeyError:
+ missing.append(self.name)
+ return installed, missing
+
+ for dep in dependencies:
+ name, version = dep['name'].rsplit('@', 1)
+ installed.append(name)
+
+ if self.name not in installed:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ if self.name_version:
+ # Yarn has a separate command for installing packages by name...
+ return self._exec(['add', self.name_version])
+ # And one for installing all packages in package.json
+ return self._exec(['install', '--non-interactive'])
+
+ def update(self):
+ return self._exec(['upgrade', '--latest'])
+
+ def uninstall(self):
+ return self._exec(['remove', self.name])
+
+ def list_outdated(self):
+ outdated = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ return outdated
+
+ cmd_result, err = self._exec(['outdated', '--json'], True, False)
+ if err:
+ self.module.fail_json(msg=err)
+
+ if not cmd_result:
+ return outdated
+
+ outdated_packages_data = cmd_result.splitlines()[1]
+
+ data = json.loads(outdated_packages_data)
+
+ try:
+ outdated_dependencies = data['data']['body']
+ except KeyError:
+ return outdated
+
+ for dep in outdated_dependencies:
+ # Outdated dependencies returned as a list of lists, where
+ # item at index 0 is the name of the dependency
+ outdated.append(dep[0])
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ globally = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ # When installing globally, users should not be able to define a path for installation.
+ # Require a path if global is False, though!
+ if path is None and globally is False:
+ module.fail_json(msg='Path must be specified when not using global arg')
+ elif path and globally is True:
+ module.fail_json(msg='Cannot specify path if doing global installation')
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='Package must be explicitly named when uninstalling.')
+ if state == 'latest':
+ version = 'latest'
+
+ # When installing globally, use the defined path for global node_modules
+ if globally:
+ path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH
+
+ yarn = Yarn(module,
+ name=name,
+ path=path,
+ version=version,
+ globally=globally,
+ production=production,
+ executable=executable,
+ registry=registry,
+ ignore_scripts=ignore_scripts)
+
+ changed = False
+ out = ''
+ err = ''
+ if state == 'present':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+
+ elif state == 'latest':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ outdated = yarn.list_outdated()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+ if len(outdated):
+ changed = True
+ out, err = yarn.update()
+ else:
+ # state == absent
+ installed, missing = yarn.list()
+ if name in installed:
+ changed = True
+ out, err = yarn.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py
new file mode 100644
index 00000000..74b738de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>) modules.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+author: "Kevin Brebanov (@kbrebanov)"
+options:
+ available:
+ description:
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
+ if the currently installed package is no longer available from any repository.
+ type: bool
+ default: no
+ name:
+ description:
+ - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ type: list
+ elements: str
+ no_cache:
+ description:
+ - Do not use any local cache path.
+ type: bool
+ default: no
+ version_added: 1.0.0
+ repository:
+ description:
+ - A package repository or multiple repositories.
+ Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
+ - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ default: present
+ choices: [ "present", "absent", "latest", "installed", "removed" ]
+ type: str
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ type: bool
+ default: no
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Update repositories and install foo package
+ community.general.apk:
+ name: foo
+ update_cache: yes
+
+- name: Update repositories and install foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ update_cache: yes
+
+- name: Remove foo package
+ community.general.apk:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ state: absent
+
+- name: Install the package foo
+ community.general.apk:
+ name: foo
+ state: present
+
+- name: Install the packages foo and bar
+ community.general.apk:
+ name: foo,bar
+ state: present
+
+- name: Update repositories and update package foo to latest version
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Update repositories and update packages foo and bar to latest versions
+ community.general.apk:
+ name: foo,bar
+ state: latest
+ update_cache: yes
+
+- name: Update all installed packages to the latest versions
+ community.general.apk:
+ upgrade: yes
+
+- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
+ community.general.apk:
+ available: yes
+ upgrade: yes
+
+- name: Update repositories as a separate step
+ community.general.apk:
+ update_cache: yes
+
+- name: Install package from a specific repository
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+ repository: http://dl-3.alpinelinux.org/alpine/edge/main
+
+- name: Install package without using cache
+ community.general.apk:
+ name: foo
+ state: latest
+ no_cache: yes
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when packages have changed
+ type: list
+ sample: ['package', 'other-package']
+'''
+
+import re
+# Import module snippets.
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_for_packages(stdout):
+ packages = []
+ data = stdout.split('\n')
+ regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
+ for l in data:
+ p = regex.search(l)
+ if p:
+ packages.append(p.group(1))
+ return packages
+
+
+def update_package_db(module, exit):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
+ elif exit:
+ module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
+ else:
+ return True
+
+
+def query_toplevel(module, name):
+ # /etc/apk/world contains a list of top-level packages separated by ' ' or \n
+ # packages may contain repository (@) or version (=<>~) separator characters or start with negation !
+ regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
+ with open('/etc/apk/world') as f:
+ content = f.read().split()
+ for p in content:
+ if regex.search(p):
+ return True
+ return False
+
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"^%s: virtual meta package" % (re.escape(name))
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+
+def upgrade_packages(module, available):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ if available:
+ cmd = "%s --available" % cmd
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
+ if re.search(r'^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def install_packages(module, names, state):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_toplevel(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install + to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ # Check to see if packages are still present because of dependencies
+ for name in installed:
+ if query_package(module, name):
+ rc = 1
+ break
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+
+# ==========================================
+# Main control flow.
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name=dict(type='list', elements='str'),
+ no_cache=dict(default=False, type='bool'),
+ repository=dict(type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ available=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ if p['no_cache']:
+ APK_PATH = "%s --no-cache" % (APK_PATH, )
+
+ # add repositories to the APK_PATH
+ if p['repository']:
+ for r in p['repository']:
+ APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module, not p['name'] and not p['upgrade'])
+
+ if p['upgrade']:
+ upgrade_packages(module, p['available'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py
new file mode 100644
index 00000000..d196e03b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Mikhail Gordeev
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_repo
+short_description: Manage APT repositories via apt-repo
+description:
+ - Manages APT repositories using apt-repo tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+notes:
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in apt-repo tool.
+options:
+ repo:
+ description:
+ - Name of the repository to add or remove.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicates the desired repository state.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ remove_others:
+ description:
+ - Remove other then added repositories
+ - Used if I(state=present)
+ type: bool
+ default: no
+ update:
+ description:
+ - Update the package database after changing repositories.
+ type: bool
+ default: no
+author:
+- Mikhail Gordeev (@obirvalger)
+'''
+
+EXAMPLES = '''
+- name: Remove all repositories
+ community.general.apt_repo:
+ repo: all
+ state: absent
+
+- name: Add repository `Sisysphus` and remove other repositories
+ community.general.apt_repo:
+ repo: Sisysphus
+ state: present
+ remove_others: yes
+
+- name: Add local repository `/space/ALT/Sisyphus` and update package cache
+ community.general.apt_repo:
+ repo: copy:///space/ALT/Sisyphus
+ state: present
+ update: yes
+'''
+
+RETURN = ''' # '''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_REPO_PATH = "/usr/bin/apt-repo"
+
+
+def apt_repo(module, *args):
+ """run apt-repo with args and return its output"""
+ # make args list to use in concatenation
+ args = list(args)
+ rc, out, err = module.run_command([APT_REPO_PATH] + args)
+
+ if rc != 0:
+ module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
+
+ return out
+
+
+def add_repo(module, repo):
+ """add a repository"""
+ apt_repo(module, 'add', repo)
+
+
+def rm_repo(module, repo):
+ """remove a repository"""
+ apt_repo(module, 'rm', repo)
+
+
+def set_repo(module, repo):
+ """add a repository and remove other repositories"""
+ # first add to validate repository
+ apt_repo(module, 'add', repo)
+ apt_repo(module, 'rm', 'all')
+ apt_repo(module, 'add', repo)
+
+
+def update(module):
+ """update package cache"""
+ apt_repo(module, 'update')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ remove_others=dict(type='bool', default=False),
+ update=dict(type='bool', default=False),
+ ),
+ )
+
+ if not os.path.exists(APT_REPO_PATH):
+ module.fail_json(msg='cannot find /usr/bin/apt-repo')
+
+ params = module.params
+ repo = params['repo']
+ state = params['state']
+ old_repositories = apt_repo(module)
+
+ if state == 'present':
+ if params['remove_others']:
+ set_repo(module, repo)
+ else:
+ add_repo(module, repo)
+ elif state == 'absent':
+ rm_repo(module, repo)
+
+ if params['update']:
+ update(module)
+
+ new_repositories = apt_repo(module)
+ changed = old_repositories != new_repositories
+ module.exit_json(changed=changed, repo=repo, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py
new file mode 100644
index 00000000..6b6bb7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov <evg@altlinux.org>
+# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: apt_rpm package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+options:
+ package:
+ description:
+ - list of packages to install, upgrade or remove.
+ required: true
+ aliases: [ name, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - update the package database first C(apt-get update).
+ aliases: [ 'update-cache' ]
+ type: bool
+ default: no
+author:
+- Evgenii Terechkov (@evgkrsk)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: present
+
+- name: Install packages foo and bar
+ community.general.apt_rpm:
+ pkg:
+ - foo
+ - bar
+ state: present
+
+- name: Remove package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# bar will be the updated if a newer version exists
+- name: Update the package database and install bar
+ community.general.apt_rpm:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+import json
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_PATH = "/usr/bin/apt-get"
+RPM_PATH = "/usr/bin/rpm"
+
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ return rc == 0
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command("%s update" % APT_PATH)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db: %s" % err)
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
+ ),
+ )
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package']
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, packages)
+
+ elif p['state'] in ['absent', 'removed']:
+ remove_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py
new file mode 100644
index 00000000..1be1a722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak
+short_description: Manage flatpaks
+description:
+- Allows users to add or remove flatpaks.
+- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: path
+ default: flatpak
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The name of the flatpak to manage.
+ - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a
+ C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
+ - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ format.
+ - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the
+ installed flatpak based on the name of the flatpakref to remove it. However, there is no
+ guarantee that the names of the flatpakref file and the reverse DNS name of the installed
+ flatpak do match.
+ type: str
+ required: true
+ remote:
+ description:
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ type: str
+ default: flathub
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ type: str
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Install the spotify flatpak
+ community.general.flatpak:
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ state: present
+
+- name: Install the gedit flatpak package
+ community.general.flatpak:
+ name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
+ state: present
+
+- name: Install the gedit package from flathub for current user
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: present
+ method: user
+
+- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
+ community.general.flatpak:
+ name: org.gnome.Calendar
+ state: present
+ remote: gnome
+
+- name: Remove the gedit flatpak
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
+'''
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.basic import AnsibleModule
+
+OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
+
+
+def install_flat(module, binary, remote, name, method):
+ """Add a new flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ if name.startswith('http://') or name.startswith('https://'):
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, name]
+ else:
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def uninstall_flat(module, binary, name, method):
+ """Remove an existing flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ installed_flat_name = _match_installed_flat_name(module, binary, name, method)
+ command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def flatpak_exists(module, binary, name, method):
+ """Check if the flatpak is installed."""
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ name = _parse_flatpak_name(name).lower()
+ if name in output.lower():
+ return True
+ return False
+
+
+def _match_installed_flat_name(module, binary, name, method):
+ # This is a difficult function, since if the user supplies a flatpakref url,
+ # we have to rely on a naming convention:
+ # The flatpakref file name needs to match the flatpak name
+ global result
+ parsed_name = _parse_flatpak_name(name)
+ # Try running flatpak list with columns feature
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ _flatpak_command(module, False, command, ignore_failure=True)
+ if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
+ # Probably flatpak before 1.2
+ matched_flatpak_name = \
+ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
+ else:
+ # Probably flatpak >= 1.2
+ matched_flatpak_name = \
+ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
+
+ if matched_flatpak_name:
+ return matched_flatpak_name
+ else:
+ result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
+ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\
+ "If you used a URL, try using the reverse DNS name of the flatpak"
+ module.fail_json(**result)
+
+
+def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() == row.lower():
+ return row
+
+
+def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() in row.lower():
+ return row.split()[0]
+
+
+def _parse_flatpak_name(name):
+ if name.startswith('http://') or name.startswith('https://'):
+ file_name = urlparse(name).path.split('/')[-1]
+ file_name_without_extension = file_name.split('.')[0:-1]
+ common_name = ".".join(file_name_without_extension)
+ else:
+ common_name = name
+ return common_name
+
+
+def _flatpak_version(module, binary):
+ global result
+ command = [binary, "--version"]
+ output = _flatpak_command(module, False, command)
+ version_number = output.split()[1]
+ return version_number
+
+
+def _flatpak_command(module, noop, command, ignore_failure=False):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=not ignore_failure
+ )
+ return result['stdout']
+
+
+def main():
+ # This module supports check mode
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ remote=dict(type='str', default='flathub'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ executable=dict(type='path', default='flatpak')
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ remote = module.params['remote']
+ method = module.params['method']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ if state == 'present' and not flatpak_exists(module, binary, name, method):
+ install_flat(module, binary, remote, name, method)
+ elif state == 'absent' and flatpak_exists(module, binary, name, method):
+ uninstall_flat(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py
new file mode 100644
index 00000000..dbb211c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(community.general.flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: str
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ type: str
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = [binary, "remote-list", "-d", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=True
+ )
+ return result['stdout']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py
new file mode 100644
index 00000000..21dea647
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py
@@ -0,0 +1,971 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
+ - homebrew must already be installed on the target system
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+options:
+ name:
+ description:
+ - A list of names of packages to install/remove.
+ aliases: [ 'formula', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "A ':' separated list of paths to search for 'brew' executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
+ providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - state of the package.
+ choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ]
+ default: present
+ type: str
+ update_homebrew:
+ description:
+ - update homebrew itself first.
+ type: bool
+ default: no
+ aliases: ['update-brew']
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages.
+ type: bool
+ default: no
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package.
+ aliases: ['options']
+ type: list
+ elements: str
+ upgrade_options:
+ description:
+ - Option flags to upgrade.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- community.general.homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+ update_homebrew: yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: latest
+ update_homebrew: yes
+
+# Update homebrew and upgrade all packages
+- community.general.homebrew:
+ update_homebrew: yes
+ upgrade_all: yes
+
+# Miscellaneous other examples
+- community.general.homebrew:
+ name: foo
+ state: head
+
+- community.general.homebrew:
+ name: foo
+ state: linked
+
+- community.general.homebrew:
+ name: foo
+ state: absent
+
+- community.general.homebrew:
+ name: foo,bar
+ state: absent
+
+- community.general.homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
+
+- name: Use ignored-pinned option while upgrading all
+ community.general.homebrew:
+ upgrade_all: yes
+ upgrade_options: ignored-pinned
+'''
+
+RETURN = '''
+msg:
+ description: if the cache was updated or not
+ returned: always
+ type: str
+ sample: "Changed: 0, Unchanged: 2"
+unchanged_pkgs:
+ description:
+ - List of package names which are unchanged after module run
+ returned: success
+ type: list
+ sample: ["awscli", "ag"]
+ version_added: '0.2.0'
+changed_pkgs:
+ description:
+ - List of package names which are changed after module run
+ returned: success
+ type: list
+ sample: ['git', 'git-cola']
+ version_added: '0.2.0'
+'''
+
+import os.path
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ \- # dashes
+ : # colons (for URLs)
+ @ # at-sign
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, string_types):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, string_types)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None, upgrade_options=None):
+ if not install_options:
+ install_options = list()
+ if not upgrade_options:
+ upgrade_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options,
+ upgrade_options=upgrade_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.changed_pkgs = []
+ self.unchanged_pkgs = []
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew would be updated.'
+ raise HomebrewException(self.message)
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew packages would be upgraded.'
+ raise HomebrewException(self.message)
+ cmd = [self.brew_path, 'upgrade'] + self.upgrade_options
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall', '--force']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ upgrade_options=dict(
+ default=None,
+ type='list',
+ elements='str',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ if not update_homebrew:
+ module.run_command_environ_update.update(
+ dict(HOMEBREW_NO_AUTO_UPDATE="True")
+ )
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ p['upgrade_options'] = p['upgrade_options'] or []
+ upgrade_options = ['--{0}'.format(upgrade_option)
+ for upgrade_option in p['upgrade_options']]
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options,
+ upgrade_options=upgrade_options)
+ (failed, changed, message) = brew.run()
+ changed_pkgs = brew.changed_pkgs
+ unchanged_pkgs = brew.unchanged_pkgs
+
+ if failed:
+ module.fail_json(msg=message)
+ module.exit_json(
+ changed=changed,
+ msg=message,
+ unchanged_pkgs=unchanged_pkgs,
+ changed_pkgs=changed_pkgs
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py
new file mode 100644
index 00000000..feb1ba68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright: (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+- "Indrajit Raychaudhuri (@indrajitr)"
+- "Daniel Jaouen (@danieljaouen)"
+- "Enric Lluelles (@enriclluelles)"
+requirements:
+- "python >= 2.6"
+short_description: Install and uninstall homebrew casks
+description:
+- Manages Homebrew casks.
+options:
+ name:
+ description:
+ - Name of cask to install or remove.
+ aliases: [ 'cask', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - State of the cask.
+ choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ]
+ default: present
+ type: str
+ sudo_password:
+ description:
+ - The sudo password to be passed to SUDO_ASKPASS.
+ required: false
+ type: str
+ update_homebrew:
+ description:
+ - Update homebrew itself first.
+ - Note that C(brew cask update) is a synonym for C(brew update).
+ type: bool
+ default: no
+ aliases: [ 'update-brew' ]
+ install_options:
+ description:
+ - Options flags to install a package.
+ aliases: [ 'options' ]
+ type: list
+ elements: str
+ accept_external_apps:
+ description:
+ - Allow external apps.
+ type: bool
+ default: no
+ upgrade_all:
+ description:
+ - Upgrade all casks.
+ - Mutually exclusive with C(upgraded) state.
+ type: bool
+ default: no
+ aliases: [ 'upgrade' ]
+ greedy:
+ description:
+ - Upgrade casks that auto update.
+ - Passes --greedy to brew cask outdated when checking
+ if an installed cask has a newer version available.
+ type: bool
+ default: no
+'''
+EXAMPLES = '''
+- name: Install cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+
+- name: Remove cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- name: Allow external app
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ accept_external_apps: True
+
+- name: Remove cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
+
+- name: Upgrade all casks
+ community.general.homebrew_cask:
+ upgrade_all: true
+
+- name: Upgrade given cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: upgraded
+ install_options: force
+
+- name: Upgrade cask with greedy option
+ community.general.homebrew_cask:
+ name: 1password
+ state: upgraded
+ greedy: True
+
+- name: Using sudo password for installing cask
+ community.general.homebrew_cask:
+ name: wireshark
+ state: present
+ sudo_password: "{{ ansible_become_pass }}"
+'''
+
+import os
+import re
+import tempfile
+from distutils import version
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \- # dashes
+ @ # at symbol
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, (string_types)):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, string_types)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+
+ @property
+ def brew_version(self):
+ try:
+ return self._brew_version
+ except AttributeError:
+ return None
+
+ @brew_version.setter
+ def brew_version(self, brew_version):
+ self._brew_version = brew_version
+
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ sudo_password=None, update_homebrew=False,
+ install_options=None, accept_external_apps=False,
+ upgrade_all=False, greedy=False):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_outdated(self):
+ if not self.valid_cask(self.current_cask):
+ return False
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'outdated', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'outdated']
+
+ cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask]
+
+ rc, out, err = self.module.run_command(cask_is_outdated_command)
+
+ return out != ""
+
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, "list", "--cask"]
+ else:
+ base_opts = [self.brew_path, "cask", "list"]
+
+ cmd = base_opts + [self.current_cask]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _get_brew_version(self):
+ if self.brew_version:
+ return self.brew_version
+
+ cmd = [self.brew_path, '--version']
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ # get version string from first line of "brew --version" output
+ version = out.split('\n')[0].split(' ')[1]
+ self.brew_version = version
+ return self.brew_version
+
+ def _brew_cask_command_is_deprecated(self):
+ # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
+ return version.LooseVersion(self._get_brew_version()) >= version.LooseVersion('2.6.0')
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.upgrade_all:
+ return self._upgrade_all()
+
+ if self.casks:
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'upgraded':
+ return self._upgrade_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ self.failed = True
+ self.message = "You must select a cask to install."
+ raise HomebrewCaskException(self.message)
+
+ # sudo_password fix ---------------------- {{{
+ def _run_command_with_sudo_password(self, cmd):
+ rc, out, err = '', '', ''
+
+ with tempfile.NamedTemporaryFile() as sudo_askpass_file:
+ sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password))
+ os.chmod(sudo_askpass_file.name, 0o700)
+ sudo_askpass_file.file.close()
+
+ rc, out, err = self.module.run_command(
+ cmd,
+ environ_update={'SUDO_ASKPASS': sudo_askpass_file.name}
+ )
+
+ self.module.add_cleanup_file(sudo_askpass_file.name)
+
+ return (rc, out, err)
+ # /sudo_password fix --------------------- }}}
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Casks would be upgraded.'
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ cmd = [self.brew_path, 'upgrade', '--cask']
+ else:
+ cmd = [self.brew_path, 'cask', 'upgrade']
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
+ self.message = 'Homebrew casks already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew casks upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'install', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'install']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_cask(self):
+ command = 'upgrade'
+
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ command = 'install'
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.message = 'Cask is already upgraded: {0}'.format(
+ self.current_cask,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be upgraded: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, command, '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', command]
+
+ opts = base_opts + self.install_options + [self.current_cask]
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask upgraded: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _upgrade_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._upgrade_current_cask()
+
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'uninstall', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'uninstall']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled --------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ sudo_password=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ accept_external_apps=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ greedy=dict(
+ default=False,
+ type='bool',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ sudo_password = p['sudo_password']
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ greedy = p['greedy']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ accept_external_apps = p['accept_external_apps']
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy,
+ )
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py
new file mode 100644
index 00000000..d31da485
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ type: list
+ elements: str
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ required: false
+ type: str
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+ type: str
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+
+- name: Tap a Homebrew repository, state absent
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- name: Tap a Homebrew repository using url, state present
+ community.general.homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if rc == 0:
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True, elements='str'),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin', '/opt/homebrew/bin']
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of multiple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py
new file mode 100644
index 00000000..af7a950a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: installp
+author:
+- Kairo Araujo (@kairoaraujo)
+short_description: Manage packages on AIX
+description:
+ - Manage packages using 'installp' on AIX
+options:
+ accept_license:
+ description:
+ - Whether to accept the license for the package(s).
+ type: bool
+ default: no
+ name:
+ description:
+ - One or more packages to install or remove.
+ - Use C(all) to install all packages available on informed C(repository_path).
+ type: list
+ elements: str
+ required: true
+ aliases: [ pkg ]
+ repository_path:
+ description:
+ - Path with AIX packages (required to install).
+ type: path
+ state:
+ description:
+ - Whether the package needs to be present on or absent from the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+- If the package is already installed, even the package/fileset is new, the module will not install it.
+'''
+
+EXAMPLES = r'''
+- name: Install package foo
+ community.general.installp:
+ name: foo
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master only
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Remove packages bos.sysmgt.nim.master
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_new_pkg(module, package, repository_path):
+ """
+ Check if the package of fileset is correct name and repository path.
+
+ :param module: Ansible module arguments spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package information.
+ """
+
+ if os.path.isdir(repository_path):
+ installp_cmd = module.get_bin_path('installp', True)
+ rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+
+ if package == 'all':
+ pkg_info = "All packages on dir"
+ return True, pkg_info
+
+ else:
+ pkg_info = {}
+ for line in package_result.splitlines():
+ if re.findall(package, line):
+ pkg_name = line.split()[0].strip()
+ pkg_version = line.split()[1].strip()
+ pkg_info[pkg_name] = pkg_version
+
+ return True, pkg_info
+
+ return False, None
+
+ else:
+ module.fail_json(msg="Repository path %s is not valid." % repository_path)
+
+
+def _check_installed_pkg(module, package, repository_path):
+ """
+ Check the package on AIX.
+ It verifies if the package is installed and informations
+
+ :param module: Ansible module parameters spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package data.
+ """
+
+ lslpp_cmd = module.get_bin_path('lslpp', True)
+ rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+
+ if rc == 1:
+ package_state = ' '.join(err.split()[-2:])
+ if package_state == 'not installed.':
+ return False, None
+ else:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ pkg_data = {}
+ full_pkg_data = lslpp_result.splitlines()
+ for line in full_pkg_data:
+ pkg_name, fileset, level = line.split(':')[0:3]
+ pkg_data[pkg_name] = fileset, level
+
+ return True, pkg_data
+
+
+def remove(module, installp_cmd, packages):
+ repository_path = None
+ remove_count = 0
+ removed_pkgs = []
+ not_found_pkg = []
+ for package in packages:
+ pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
+
+ if pkg_check:
+ if not module.check_mode:
+ rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+ remove_count += 1
+ removed_pkgs.append(package)
+
+ else:
+ not_found_pkg.append(package)
+
+ if remove_count > 0:
+ if len(not_found_pkg) > 1:
+ not_found_pkg.insert(0, "Package(s) not found: ")
+
+ changed = True
+ msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
+
+ else:
+ changed = False
+ msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
+
+ return changed, msg
+
+
+def install(module, installp_cmd, packages, repository_path, accept_license):
+ installed_pkgs = []
+ not_found_pkgs = []
+ already_installed_pkgs = {}
+
+ accept_license_param = {
+ True: '-Y',
+ False: '',
+ }
+
+ # Validate if package exists on repository path.
+ for package in packages:
+ pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
+
+ # If package exists on repository path, check if package is installed.
+ if pkg_check:
+ pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
+
+ # If package is already installed.
+ if pkg_check_current:
+ # Check if package is a package and not a fileset, get version
+ # and add the package into already installed list
+ if package in pkg_info.keys():
+ already_installed_pkgs[package] = pkg_info[package][1]
+
+ else:
+ # If the package is not a package but a fileset, confirm
+ # and add the fileset/package into already installed list
+ for key in pkg_info.keys():
+ if package in pkg_info[key]:
+ already_installed_pkgs[package] = pkg_info[key][1]
+
+ else:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp", rc=rc, err=err)
+ installed_pkgs.append(package)
+
+ else:
+ not_found_pkgs.append(package)
+
+ if len(installed_pkgs) > 0:
+ installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
+ else:
+ installed_msg = ''
+
+ if len(not_found_pkgs) > 0:
+ not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
+ else:
+ not_found_msg = ''
+
+ if len(already_installed_pkgs) > 0:
+ already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
+ else:
+ already_installed_msg = ''
+
+ if len(installed_pkgs) > 0:
+ changed = True
+ msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+ else:
+ changed = False
+ msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ repository_path=dict(type='path'),
+ accept_license=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ repository_path = module.params['repository_path']
+ accept_license = module.params['accept_license']
+ state = module.params['state']
+
+ installp_cmd = module.get_bin_path('installp', True)
+
+ if state == 'present':
+ if repository_path is None:
+ module.fail_json(msg="repository_path is required to install package")
+
+ changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
+
+ elif state == 'absent':
+ changed, msg = remove(module, installp_cmd, name)
+
+ else:
+ module.fail_json(changed=False, msg="Unexpected state.")
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py
new file mode 100644
index 00000000..3c990205
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ type: str
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ aliases: [url]
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ default: present
+ choices: [present, absent, updated]
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(no).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+- name: Install the overlay mozilla which is on the central overlays list
+ community.general.layman:
+ name: mozilla
+
+- name: Install the overlay cvut from the specified alternative list
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+
+- name: Update (sync) the overlay cvut or install if not installed yet
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
+
+- name: Update (sync) all of the installed overlays
+ community.general.layman:
+ name: ALL
+ state: updated
+
+- name: Uninstall the overlay cvut
+ community.general.layman:
+ name: cvut
+ state: absent
+'''
+
+import shutil
+import traceback
+
+from os import path
+
+LAYMAN_IMP_ERR = None
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ LAYMAN_IMP_ERR = traceback.format_exc()
+ HAS_LAYMAN_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+USERAGENT = 'ansible-httpget'
+
+
+class ModuleError(Exception):
+ pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors():
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [str(item[1]) for item in layman.sync_results[2]]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR)
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py
new file mode 100644
index 00000000..a865a8f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages (ports)
+options:
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: "no"
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: "no"
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - 'C(variant) is only supported with state: I(installed)/I(present).'
+ aliases: ['variants']
+ type: str
+'''
+EXAMPLES = '''
+- name: Install the foo port
+ community.general.macports:
+ name: foo
+
+- name: Install the universal, x11 variant of the foo port
+ community.general.macports:
+ name: foo
+ variant: +universal+x11
+
+- name: Install a list of ports
+ community.general.macports:
+ name: "{{ ports }}"
+ vars:
+ ports:
+ - foo
+ - foo-tools
+
+- name: Update Macports and the ports tree, then upgrade all outdated ports
+ community.general.macports:
+ selfupdate: yes
+ upgrade: yes
+
+- name: Update Macports and the ports tree, then install the foo port
+ community.general.macports:
+ name: foo
+ selfupdate: yes
+
+- name: Remove the foo port
+ community.general.macports:
+ name: foo
+ state: absent
+
+- name: Activate the foo port
+ community.general.macports:
+ name: foo
+ state: active
+
+- name: Deactivate the foo port
+ community.general.macports:
+ name: foo
+ state: inactive
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def selfupdate(module, port_path):
+ """ Update Macports and the ports tree. """
+
+ rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+
+ if rc == 0:
+ updated = any(
+ re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
+ re.search(r'Installing new Macports release', s.strip())
+ for s in out.split('\n')
+ if s
+ )
+ if updated:
+ changed = True
+ msg = "Macports updated successfully"
+ else:
+ changed = False
+ msg = "Macports already up-to-date"
+
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
+
+
+def upgrade(module, port_path):
+ """ Upgrade outdated ports. """
+
+ rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+
+ # rc is 1 when nothing to upgrade so check stdout first.
+ if out.strip() == "Nothing to upgrade.":
+ changed = False
+ msg = "Ports already upgraded"
+ return (changed, msg)
+ elif rc == 0:
+ changed = True
+ msg = "Outdated ports upgraded successfully"
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
+
+
+def query_port(module, port_path, name, state="present"):
+ """ Returns whether a port is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and out.strip().startswith(name + " "):
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and "(active)" in out:
+ return True
+
+ return False
+
+
+def remove_ports(module, port_path, ports):
+ """ Uninstalls one or more ports if installed. """
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the port that failed
+ for port in ports:
+ # Query the port first, to see if we even need to remove
+ if not query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+
+ if query_port(module, port_path, port):
+ module.fail_json(msg="Failed to remove %s: %s" % (port, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="Port(s) already absent")
+
+
+def install_ports(module, port_path, ports, variant):
+ """ Installs one or more ports if not already installed. """
+
+ install_c = 0
+
+ for port in ports:
+ if query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to install %s: %s" % (port, err))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="Port(s) already present")
+
+
+def activate_ports(module, port_path, ports):
+ """ Activate a port if it's inactive. """
+
+ activate_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
+
+ if query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+
+ if not query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to activate %s: %s" % (port, err))
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
+
+ module.exit_json(changed=False, msg="Port(s) already active")
+
+
+def deactivate_ports(module, port_path, ports):
+ """ Deactivate a port if it's active. """
+
+ deactivated_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
+
+ if not query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+
+ if query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
+
+ module.exit_json(changed=False, msg="Port(s) already inactive")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=["port"]),
+ selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ upgrade=dict(default=False, type='bool'),
+ variant=dict(aliases=["variants"], default=None, type='str')
+ )
+ )
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["selfupdate"]:
+ (changed, msg) = selfupdate(module, port_path)
+ if not (p["name"] or p["upgrade"]):
+ module.exit_json(changed=changed, msg=msg)
+
+ if p["upgrade"]:
+ (changed, msg) = upgrade(module, port_path)
+ if not p["name"]:
+ module.exit_json(changed=changed, msg=msg)
+
+ pkgs = p["name"]
+
+ variant = p["variant"]
+
+ if p["state"] in ["present", "installed"]:
+ install_ports(module, port_path, pkgs, variant)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_ports(module, port_path, pkgs)
+
+ elif p["state"] == "active":
+ activate_ports(module, port_path, pkgs)
+
+ elif p["state"] == "inactive":
+ deactivate_ports(module, port_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py
new file mode 100644
index 00000000..bc3e6dfd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mas
+short_description: Manage Mac App Store applications with mas-cli
+description:
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+version_added: '0.2.0'
+author:
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
+options:
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The C(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: "no"
+ aliases: ["upgrade"]
+requirements:
+ - macOS 10.11+
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Install Keynote
+ community.general.mas:
+ id: 409183694
+ state: present
+
+- name: Install Divvy with command mas installed in /usr/local/bin
+ community.general.mas:
+ id: 413857545
+ state: present
+ environment:
+ PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
+
+- name: Install a list of apps
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+
+- name: Ensure the latest Keynote version is installed
+ community.general.mas:
+ id: 409183694
+ state: latest
+
+- name: Upgrade all installed Mac App Store apps
+ community.general.mas:
+ upgrade_all: yes
+
+- name: Install specific apps and also upgrade all others
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+ upgrade_all: yes
+
+- name: Uninstall Divvy
+ community.general.mas:
+ id: 413857545
+ state: absent
+ become: yes # Uninstallation requires root permissions
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import StrictVersion
+import os
+
+
+class Mas(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # Initialize data properties
+ self.mas_path = self.module.get_bin_path('mas')
+ self._checked_signin = False
+ self._installed = None # Populated only if needed
+ self._outdated = None # Populated only if needed
+ self.count_install = 0
+ self.count_upgrade = 0
+ self.count_uninstall = 0
+ self.result = {
+ 'changed': False
+ }
+
+ self.check_mas_tool()
+
+ def app_command(self, command, id):
+ ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
+
+ if not self.module.check_mode:
+ if command != 'uninstall':
+ self.check_signin()
+
+ rc, out, err = self.run([command, str(id)])
+ if rc != 0:
+ self.module.fail_json(
+ msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
+ )
+
+ # No error or dry run
+ self.__dict__['count_' + command] += 1
+
+ def check_mas_tool(self):
+ ''' Verifies that the `mas` tool is available in a recent version '''
+
+ # Is the `mas` tool available at all?
+ if not self.mas_path:
+ self.module.fail_json(msg='Required `mas` tool is not installed')
+
+ # Is the version recent enough?
+ rc, out, err = self.run(['version'])
+ if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
+ self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
+
+ def check_signin(self):
+ ''' Verifies that the user is signed in to the Mac App Store '''
+
+ # Only check this once per execution
+ if self._checked_signin:
+ return
+
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
+
+ self._checked_signin = True
+
+ def exit(self):
+ ''' Exit with the data we have collected over time '''
+
+ msgs = []
+ if self.count_install > 0:
+ msgs.append('Installed {0} app(s)'.format(self.count_install))
+ if self.count_upgrade > 0:
+ msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
+ if self.count_uninstall > 0:
+ msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
+
+ if msgs:
+ self.result['changed'] = True
+ self.result['msg'] = ', '.join(msgs)
+
+ self.module.exit_json(**self.result)
+
+ def get_current_state(self, command):
+ ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
+
+ rc, raw_apps, err = self.run([command])
+ rows = raw_apps.split("\n")
+ if rows[0] == "No installed apps found":
+ rows = []
+ apps = []
+ for r in rows:
+ # Format: "123456789 App Name"
+ r = r.split(' ', 1)
+ if len(r) == 2:
+ apps.append(int(r[0]))
+
+ return apps
+
+ def installed(self):
+ ''' Returns the list of installed apps '''
+
+ # Populate cache if not already done
+ if self._installed is None:
+ self._installed = self.get_current_state('list')
+
+ return self._installed
+
+ def is_installed(self, id):
+ ''' Checks whether the given app is installed '''
+
+ return int(id) in self.installed()
+
+ def is_outdated(self, id):
+ ''' Checks whether the given app is installed, but outdated '''
+
+ return int(id) in self.outdated()
+
+ def outdated(self):
+ ''' Returns the list of installed, but outdated apps '''
+
+ # Populate cache if not already done
+ if self._outdated is None:
+ self._outdated = self.get_current_state('outdated')
+
+ return self._outdated
+
+ def run(self, cmd):
+ ''' Runs a command of the `mas` tool '''
+
+ cmd.insert(0, self.mas_path)
+ return self.module.run_command(cmd, False)
+
+ def upgrade_all(self):
+ ''' Upgrades all installed apps and sets the correct result data '''
+
+ outdated = self.outdated()
+
+ if not self.module.check_mode:
+ self.check_signin()
+
+ rc, out, err = self.run(['upgrade'])
+ if rc != 0:
+ self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
+
+ self.count_upgrade += len(outdated)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='list', elements='int'),
+ state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
+ upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
+ ),
+ supports_check_mode=True
+ )
+ mas = Mas(module)
+
+ if module.params['id']:
+ apps = module.params['id']
+ else:
+ apps = []
+
+ state = module.params['state']
+ upgrade = module.params['upgrade_all']
+
+ # Run operations on the given app IDs
+ for app in sorted(set(apps)):
+ if state == 'present':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+
+ elif state == 'absent':
+ if mas.is_installed(app):
+ # Ensure we are root
+ if os.getuid() != 0:
+ module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
+
+ mas.app_command('uninstall', app)
+
+ elif state == 'latest':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+ elif mas.is_outdated(app):
+ mas.app_command('upgrade', app)
+
+ # Upgrade all apps if requested
+ mas._outdated = None # Clear cache
+ if upgrade and mas.outdated():
+ mas.upgrade_all()
+
+ # Exit with the collected data
+ mas.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py
new file mode 100644
index 00000000..7432c48a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author:
+- Patrik Lundin (@eest)
+short_description: Manage packages on OpenBSD
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+requirements:
+- python >= 2.5
+options:
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: yes
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ choices: [ absent, latest, present, installed, removed ]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ - Mutually exclusive with I(snapshot).
+ type: bool
+ default: no
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with I(build).
+ type: bool
+ default: no
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the C(build) option, allows overriding
+ the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration
+ file(s) in the old packages which are annotated with @extra in
+ the packaging-list.
+ type: bool
+ default: no
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums
+ before removing normal files.
+ type: bool
+ default: no
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Make sure nmap is installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+
+- name: Make sure nmap is the latest version
+ community.general.openbsd_pkg:
+ name: nmap
+ state: latest
+
+- name: Make sure nmap is not installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: absent
+
+- name: Make sure nmap is installed, build it from source if it is not
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+ build: yes
+
+- name: Specify a pkg flavour with '--'
+ community.general.openbsd_pkg:
+ name: vim--no_x11
+ state: present
+
+- name: Specify the default flavour to avoid ambiguity errors
+ community.general.openbsd_pkg:
+ name: vim--
+ state: present
+
+- name: Specify a package branch (requires at least OpenBSD 6.0)
+ community.general.openbsd_pkg:
+ name: python%3.5
+ state: present
+
+- name: Update all packages on the system
+ community.general.openbsd_pkg:
+ name: '*'
+ state: latest
+
+- name: Purge a package and it's configuration files
+ community.general.openbsd_pkg:
+ name: mpd
+ clean: yes
+ state: absent
+
+- name: Quickly remove a package without checking checksums
+ community.general.openbsd_pkg:
+ name: qt5
+ quick: yes
+ state: absent
+'''
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+ return module.run_command(cmd_args)
+
+
+# Function used to find out if a package is currently installed.
+def get_package_state(names, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ for name in names:
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
+ pkg_spec[name]['installed_state'] = True
+ else:
+ pkg_spec[name]['installed_state'] = False
+
+
+# Function used to make sure a package is present.
+def package_present(names, pkg_spec, module):
+ build = module.params['build']
+
+ for name in names:
+ # It is possible package_present() has been called from package_latest().
+ # In that case we do not want to operate on the whole list of names,
+ # only the leftovers.
+ if pkg_spec['package_latest_leftovers']:
+ if name not in pkg_spec['package_latest_leftovers']:
+ module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
+ continue
+ else:
+ module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec[name]['flavor']:
+ flavors = pkg_spec[name]['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec[name]['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if module.params['snapshot'] is True:
+ install_cmd += ' -Dsnap'
+
+ if pkg_spec[name]['installed_state'] is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") even though a branch name may look like a
+ # version string it is not used an one by pkg_add.
+ if pkg_spec[name]['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code for name '%s'" % name)
+ if pkg_spec[name]['rc']:
+ pkg_spec[name]['changed'] = False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr for name '%s'" % name)
+ if pkg_spec[name]['stderr']:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
+
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package for name '%s'" % name)
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail for name '%s'" % name)
+ pkg_spec[name]['rc'] = 1
+ pkg_spec[name]['changed'] = False
+ else:
+ module.debug("package_present(): stderr was not set for name '%s'" % name)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to make sure a package is the latest available version.
+def package_latest(names, pkg_spec, module):
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ upgrade_cmd = 'pkg_add -um'
+
+ if module.check_mode:
+ upgrade_cmd += 'n'
+
+ if module.params['clean']:
+ upgrade_cmd += 'c'
+
+ if module.params['quick']:
+ upgrade_cmd += 'q'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+
+ # Attempt to upgrade the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ pkg_spec[name]['changed'] = False
+ for installed_name in pkg_spec[name]['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+
+ pkg_spec[name]['changed'] = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if pkg_spec[name]['changed'] is not True:
+ if pkg_spec[name]['stderr']:
+ pkg_spec[name]['rc'] = 1
+
+ else:
+ # Note packages that need to be handled by package_present
+ module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
+ pkg_spec['package_latest_leftovers'].append(name)
+
+ # If there were any packages that were not installed we call
+ # package_present() which will handle those.
+ if pkg_spec['package_latest_leftovers']:
+ module.debug("package_latest(): calling package_present() to handle leftovers")
+ package_present(names, pkg_spec, module)
+
+
+# Function used to make sure a package is not installed.
+def package_absent(names, pkg_spec, module):
+ remove_cmd = 'pkg_delete -I'
+
+ if module.check_mode:
+ remove_cmd += 'n'
+
+ if module.params['clean']:
+ remove_cmd += 'c'
+
+ if module.params['quick']:
+ remove_cmd += 'q'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+ # Attempt to remove the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+ else:
+ pkg_spec[name]['changed'] = False
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(names, pkg_spec, module):
+
+ # Initialize empty list of package_latest() leftovers.
+ pkg_spec['package_latest_leftovers'] = []
+
+ for name in names:
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # All information for a given name is kept in the pkg_spec keyed by that name.
+ pkg_spec[name] = {}
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = match.group('version')
+ pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'version'
+ module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
+ "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = '-'
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'versionless'
+ module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a
+ # stem, possibly with a branch (%branchname) tacked on at the
+ # end.
+ else:
+ match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = None
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = None
+ pkg_spec[name]['flavor'] = None
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'stem'
+ module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # Verify that the managed host is new enough to support branch syntax.
+ if pkg_spec[name]['branch']:
+ branch_release = "6.0"
+
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec[name]['flavor']:
+ match = re.search("-$", pkg_spec[name]['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
+
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec[name]['subpackage'] = None
+ if pkg_spec[name]['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec[name]['flavor']:
+ looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec[name]['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x: x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec[name]['subpackage'] = parts[1]
+ return parts[0]
+
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(pkg_spec, module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ # Create a minimal pkg_spec entry for '*' to store return values.
+ pkg_spec['*'] = {}
+
+ # Attempt to upgrade all packages.
+ pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurrence of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
+ if match:
+ pkg_spec['*']['changed'] = True
+
+ else:
+ pkg_spec['*']['changed'] = False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if pkg_spec['*']['stderr']:
+ pkg_spec['*']['rc'] = 1
+ else:
+ pkg_spec['*']['rc'] = 0
+
+
+# ===========================================
+# Main control flow.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build=dict(type='bool', default=False),
+ snapshot=dict(type='bool', default=False),
+ ports_dir=dict(type='path', default='/usr/ports'),
+ quick=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['snapshot', 'build']],
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ # The data structure used to keep track of package information.
+ pkg_spec = {}
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ parse_package_name(['sqlports'], pkg_spec, module)
+ get_package_state(['sqlports'], pkg_spec, module)
+ if not pkg_spec['sqlports']['installed_state']:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present(['sqlports'], pkg_spec, module)
+
+ asterisk_name = False
+ for n in name:
+ if n == '*':
+ if len(name) != 1:
+ module.fail_json(msg="the package name '*' can not be mixed with other names")
+
+ asterisk_name = True
+
+ if asterisk_name:
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ upgrade_packages(pkg_spec, module)
+ else:
+ # Parse package names and put results in the pkg_spec dictionary.
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ for n in name:
+ if pkg_spec[n]['branch'] and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
+
+ # Get state for all package names.
+ get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ package_present(name, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ package_absent(name, pkg_spec, module)
+ elif state == 'latest':
+ package_latest(name, pkg_spec, module)
+
+ # The combined changed status for all requested packages. If anything
+ # is changed this is set to True.
+ combined_changed = False
+
+ # The combined failed status for all requested packages. If anything
+ # failed this is set to True.
+ combined_failed = False
+
+ # We combine all error messages in this comma separated string, for example:
+ # "msg": "Can't find nmapp\n, Can't find nmappp\n"
+ combined_error_message = ''
+
+ # Loop over all requested package names and check if anything failed or
+ # changed.
+ for n in name:
+ if pkg_spec[n]['rc'] != 0:
+ combined_failed = True
+ if pkg_spec[n]['stderr']:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stderr']
+ else:
+ combined_error_message = pkg_spec[n]['stderr']
+ else:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stdout']
+ else:
+ combined_error_message = pkg_spec[n]['stdout']
+
+ if pkg_spec[n]['changed'] is True:
+ combined_changed = True
+
+ # If combined_error_message contains anything at least some part of the
+ # list of requested package names failed.
+ if combined_failed:
+ module.fail_json(msg=combined_error_message, **result)
+
+ result['changed'] = combined_changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py
new file mode 100644
index 00000000..7da9a487
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt
+description:
+ - Manages OpenWrt packages
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent', 'installed', 'removed' ]
+ default: present
+ type: str
+ force:
+ description:
+ - opkg --force parameter used
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ type: str
+ update_cache:
+ description:
+ - update the package db first
+ aliases: ['update-cache']
+ default: "no"
+ type: bool
+requirements:
+ - opkg
+ - python
+'''
+EXAMPLES = '''
+- name: Install foo
+ community.general.opkg:
+ name: foo
+ state: present
+
+- name: Update cache and install foo
+ community.general.opkg:
+ name: foo
+ state: present
+ update_cache: yes
+
+- name: Remove foo
+ community.general.opkg:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar
+ community.general.opkg:
+ name: foo,bar
+ state: absent
+
+- name: Install foo using overwrite option forcibly
+ community.general.opkg:
+ name: foo
+ state: present
+ force: overwrite
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def update_package_db(module, opkg_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s update" % opkg_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, opkg_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, opkg_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
+
+ if query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, opkg_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
+
+ if not query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ opkg_path = module.get_bin_path('opkg', True, ['/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, opkg_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, opkg_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, opkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py
new file mode 100644
index 00000000..0931ddc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright: (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright: (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants.
+author:
+ - Indrajit Raychaudhuri (@indrajitr)
+ - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com>
+ - Maxime de Roucy (@tchernomax)
+options:
+ name:
+ description:
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
+ Can't be used in combination with C(upgrade).
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Desired state of the package.
+ default: present
+ choices: [ absent, latest, present, installed, removed ]
+ type: str
+
+ force:
+ description:
+ - When removing package, force remove package, without any checks.
+ Same as `extra_args="--nodeps --nodeps"`.
+ When update_cache, force redownload repo databases.
+ Same as `update_cache_extra_args="--refresh --refresh"`.
+ default: no
+ type: bool
+
+ extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(state).
+ default:
+ type: str
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists.
+ - This can be run as part of a package installation or as a separate step.
+ default: no
+ type: bool
+ aliases: [ update-cache ]
+
+ update_cache_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(update_cache).
+ default:
+ type: str
+
+ upgrade:
+ description:
+ - Whether or not to upgrade the whole system.
+ Can't be used in combination with C(name).
+ default: no
+ type: bool
+
+ upgrade_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(upgrade).
+ default:
+ type: str
+
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when upgrade is set to yes
+ type: list
+ sample: [ package, other-package ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo from repo
+ community.general.pacman:
+ name: foo
+ state: present
+
+- name: Install package bar from file
+ community.general.pacman:
+ name: ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package foo from repo and bar from file
+ community.general.pacman:
+ name:
+ - foo
+ - ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Upgrade package foo
+ community.general.pacman:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Remove packages foo and bar
+ community.general.pacman:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ extra_args: --recursive
+
+- name: Run the equivalent of "pacman -Sy" as a separate step
+ community.general.pacman:
+ update_cache: yes
+
+- name: Run the equivalent of "pacman -Su" as a separate step
+ community.general.pacman:
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Syu" as a separate step
+ community.general.pacman:
+ update_cache: yes
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Rdd", force remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ force: yes
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_version(pacman_output):
+ """Take pacman -Q or pacman -S output and get the Version"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[1]
+ return None
+
+
+def get_name(module, pacman_output):
+ """Take pacman -Q or pacman -S output and get the package name"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[0]
+ module.fail_json(msg="get_name: fail to retrieve package name from pacman output")
+
+
+def query_package(module, pacman_path, name, state="present"):
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
+ boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
+ """
+ if state == "present":
+ lcmd = "%s --query %s" % (pacman_path, name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False, False
+ else:
+ # a non-zero exit code doesn't always mean the package is installed
+ # for example, if the package name queried is "provided" by another package
+ installed_name = get_name(module, lstdout)
+ if installed_name != name:
+ return False, False, False
+
+ # get the version installed locally (if any)
+ lversion = get_version(lstdout)
+
+ rcmd = "%s --sync --print-format \"%%n %%v\" %s" % (pacman_path, name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ # get the version in the repository
+ rversion = get_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally, and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion), False
+
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
+
+
+def update_package_db(module, pacman_path):
+ if module.params['force']:
+ module.params["update_cache_extra_args"] += " --refresh --refresh"
+
+ cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def upgrade(module, pacman_path):
+ cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"])
+ cmdneedrefresh = "%s --query --upgrades" % (pacman_path)
+ rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
+ data = stdout.split('\n')
+ data.remove('')
+ packages = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if rc == 0:
+ # Match lines of `pacman -Qu` output of the form:
+ # (package name) (before version-release) -> (after version-release)
+ # e.g., "ansible 2.7.1-1 -> 2.7.2-1"
+ regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)')
+ for p in data:
+ m = regex.search(p)
+ packages.append(m.group(1))
+ if module._diff:
+ diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
+ diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
+ if module.check_mode:
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
+
+
+def remove_packages(module, pacman_path, packages):
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if module.params["force"]:
+ module.params["extra_args"] += " --nodeps --nodeps"
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ if module._diff:
+ d = stdout.split('\n')[2].split(' ')[2:]
+ for i, pkg in enumerate(d):
+ d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
+ diff['before'] += "%s\n" % pkg
+ data.append('\n'.join(d))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pacman_path, state, packages, package_files):
+ install_c = 0
+ package_err = []
+ message = ""
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ to_install_repos = []
+ to_install_files = []
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present or state == latest and is up-to-date then skip
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if package_files[i]:
+ to_install_files.append(package_files[i])
+ else:
+ to_install_repos.append(package)
+
+ if to_install_repos:
+ cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_repos)
+
+ if to_install_files:
+ cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_files)
+
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
+
+
+def check_packages(module, pacman_path, packages, state):
+ would_be_changed = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ 'before_header': '',
+ 'after_header': ''
+ }
+
+ for package in packages:
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+
+ if module._diff and (state == 'removed'):
+ diff['before_header'] = 'removed'
+ diff['before'] = '\n'.join(would_be_changed) + '\n'
+ elif module._diff and ((state == 'present') or (state == 'latest')):
+ diff['after_header'] = 'installed'
+ diff['after'] = '\n'.join(would_be_changed) + '\n'
+
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state), diff=diff)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
+
+
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ __, stdout, __ = module.run_command([pacman_path, "--sync", "--groups", "--quiet"], check_rc=True)
+ available_groups = stdout.splitlines()
+
+ for pkg in pkgs:
+ if pkg: # avoid empty strings
+ if pkg in available_groups:
+ # A group was found matching the package name: expand it
+ cmd = [pacman_path, "--sync", "--groups", "--quiet", pkg]
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ expanded.extend([name.strip() for name in stdout.splitlines()])
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=['pkg', 'package']),
+ state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']),
+ force=dict(type='bool', default=False),
+ extra_args=dict(type='str', default=''),
+ upgrade=dict(type='bool', default=False),
+ upgrade_extra_args=dict(type='str', default=''),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ update_cache_extra_args=dict(type='str', default=''),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ pacman_path = module.get_bin_path('pacman', True)
+ module.run_command_environ_update = dict(LC_ALL='C')
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ update_package_db(module, pacman_path)
+ if not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Updated the package master lists')
+
+ if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, pacman_path)
+
+ if p['name']:
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if not pkg: # avoid empty strings
+ continue
+ elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z|zst))?$", pkg):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pacman_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pacman_path, pkgs)
+ else:
+ module.exit_json(changed=False, msg="No package specified to work on.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py
new file mode 100644
index 00000000..266c073f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Peter Oliver <ansible@mavit.org.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author:
+- Peter Oliver (@mavit)
+short_description: Manages packages with the Solaris 11 Image Packaging System
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ choices: [ absent, latest, present, installed, removed, uninstalled ]
+ default: present
+ type: str
+ accept_licenses:
+ description:
+ - Accept any licences.
+ type: bool
+ default: no
+ aliases: [ accept, accept_licences ]
+ be_name:
+ description:
+ - Creates a new boot environment with the given name.
+ type: str
+ refresh:
+ description:
+ - Refresh publishers before execution.
+ type: bool
+ default: yes
+'''
+EXAMPLES = '''
+- name: Install Vim
+ community.general.pkg5:
+ name: editor/vim
+
+- name: Install Vim without refreshing publishers
+ community.general.pkg5:
+ name: editor/vim
+ refresh: no
+
+- name: Remove finger daemon
+ community.general.pkg5:
+ name: service/network/finger
+ state: absent
+
+- name: Install several packages at once
+ community.general.pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']),
+ accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
+ be_name=dict(type='str'),
+ refresh=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: (
+ not is_installed(module, p) or not is_latest(module, p)
+ ),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if module.check_mode:
+ dry_run = ['-n']
+ else:
+ dry_run = []
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ if params['be_name']:
+ beadm = ['--be-name=' + module.params['be_name']]
+ else:
+ beadm = []
+
+ if params['refresh']:
+ no_refresh = []
+ else:
+ no_refresh = ['--no-refresh']
+
+ to_modify = list(filter(behaviour[state]['filter'], packages))
+ if to_modify:
+ rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc == 4:
+ response['changed'] = False
+ response['failed'] = False
+ elif rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py
new file mode 100644
index 00000000..95d57765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ type: str
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ type: bool
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ type: bool
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+'''
+EXAMPLES = '''
+- name: Fetch packages for the solaris publisher direct from Oracle
+ community.general.pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
+
+- name: Configure a publisher for locally-produced packages
+ community.general.pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list', elements='str'),
+ mirror=dict(type='list', elements='str'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] is not None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] is not None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] is not None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] is not None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] is not None:
+ args.append('--non-sticky')
+
+ if params['enabled'] is not None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] is not None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if publisher not in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if name not in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py
new file mode 100644
index 00000000..2937314f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al.
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+author:
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ type: bool
+ default: no
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ type: bool
+ default: no
+ clean:
+ description:
+ - Clean packages cache
+ type: bool
+ default: no
+ force:
+ description:
+ - Force package reinstall
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgin:
+ name: foo
+ state: present
+
+- name: Install specific version of foo package
+ community.general.pkgin:
+ name: foo-2.0.1
+ state: present
+
+- name: Update cache and install foo package
+ community.general.pkgin:
+ name: foo
+ update_cache: yes
+
+- name: Remove package foo
+ community.general.pkgin:
+ name: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.pkgin:
+ name: foo,bar
+ state: absent
+
+- name: Update repositories as a separate step
+ community.general.pkgin:
+ update_cache: yes
+
+- name: Upgrade main packages (equivalent to pkgin upgrade)
+ community.general.pkgin:
+ upgrade: yes
+
+- name: Upgrade all packages (equivalent to pkgin full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+
+- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+ force: yes
+
+- name: Clean packages cache (equivalent to pkgin clean)
+ community.general.pkgin:
+ clean: yes
+'''
+
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class PackageState(object):
+ PRESENT = 1
+ NOT_INSTALLED = 2
+ OUTDATED = 4
+ NOT_FOUND = 8
+
+
+def query_package(module, name):
+ """Search for the package by name and return state of the package.
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name not in (pkgname_with_version, pkgname_without_version):
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return PackageState.OUTDATED
+ elif raw_state == '=' or raw_state == '>':
+ return PackageState.PRESENT
+ else:
+ # Package found but not installed
+ return PackageState.NOT_INSTALLED
+ # no fall-through
+
+ # No packages were matched
+ return PackageState.NOT_FOUND
+
+ # Search failed
+ return PackageState.NOT_FOUND
+
+
+def format_action_message(module, action, count):
+ vars = {"actioned": action,
+ "count": count}
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]:
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ query_result = query_package(module, package)
+ if query_result in [PackageState.PRESENT, PackageState.OUTDATED]:
+ continue
+ elif query_result is PackageState.NOT_FOUND:
+ module.fail_json(msg="failed to find package %s for installation" % package)
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "database is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ full_upgrade=dict(default=False, type='bool'),
+ clean=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py
new file mode 100644
index 00000000..d5ed4a0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops."
+ - >
+ Warning: In Ansible 2.9 and earlier this module had a misfeature
+ where I(name=*) with I(state: latest) or I(state: present) would
+ install every package from every package repository, filling up
+ the machines disk. Avoid using them unless you are certain that
+ your role will only be used with newer versions.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ type: str
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
+ - Defines the C(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: no
+ version_added: 1.3.0
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ community.general.pkgng:
+ name: baz
+ state: latest
+
+- name: Upgrade all installed packages (see warning for the name option first!)
+ community.general.pkgng:
+ name: "*"
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def upgrade_packages(module, pkgng_path, dir_arg):
+ # Run a 'pkg upgrade', updating all packages.
+ upgraded_c = 0
+
+ cmd = "%s %s upgrade -y" % (pkgng_path, dir_arg)
+ if module.check_mode:
+ cmd += " -n"
+ rc, out, err = module.run_command(cmd)
+
+ match = re.search('^Number of packages to be upgraded: ([0-9]+)', out, re.MULTILINE)
+ if match:
+ upgraded_c = int(match.group(1))
+
+ if upgraded_c > 0:
+ return (True, "updated %s package(s)" % upgraded_c, out, err)
+ return (False, "no packages need upgrades", out, err)
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+ remove_c = 0
+ stdout = ""
+ stderr = ""
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c, stdout, stderr)
+
+ return (False, "package(s) already absent", stdout, stderr)
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
+ install_c = 0
+ stdout = ""
+ stderr = ""
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if ignoreosver:
+ # Ignore FreeBSD OS version check,
+ # useful on -STABLE and -CURRENT branches.
+ batch_var = batch_var + ' IGNORE_OSVERSION=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s %s update" % (batch_var, pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr)
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c), stdout, stderr)
+
+ return (False, "package(s) already %s" % (state), stdout, stderr)
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ stdout = ""
+ stderr = ""
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return (False, "no package(s) to autoremove", stdout, stderr)
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+
+ return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ cached=dict(default=False, type='bool'),
+ ignore_osver=dict(default=False, required=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ stdout = ""
+ stderr = ""
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["ignore_osver"]:
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 11, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater")
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if pkgs == ['*'] and p["state"] == 'latest':
+ # Operate on all installed packages. Only state: latest makes sense here.
+ _changed, _msg, _stdout, _stderr = upgrade_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ # Operate on named packages
+ named_packages = [pkg for pkg in pkgs if pkg != '*']
+ if p["state"] in ("present", "latest") and named_packages:
+ _changed, _msg, _out, _err = install_packages(module, pkgng_path, named_packages,
+ p["cached"], p["pkgsite"], dir_arg,
+ p["state"], p["ignore_osver"])
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent" and named_packages:
+ _changed, _msg, _out, _err = remove_packages(module, pkgng_path, named_packages, dir_arg)
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg, _stdout, _stderr = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py
new file mode 100644
index 00000000..9ec0ebaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: pkgutil
+short_description: OpenCSW package management on Solaris
+description:
+- This module installs, updates and removes packages from the OpenCSW project for Solaris.
+- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+- See U(https://www.opencsw.org/) for more information about the project.
+author:
+- Alexander Winkler (@dermute)
+- David Ponessa (@scathatheworm)
+options:
+ name:
+ description:
+ - The name of the package.
+ - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ type: list
+ required: true
+ elements: str
+ aliases: [ pkg ]
+ site:
+ description:
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
+ - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ type: str
+ required: true
+ choices: [ absent, installed, latest, present, removed ]
+ update_catalog:
+ description:
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes).
+ type: bool
+ default: no
+ force:
+ description:
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes).
+ - This is useful for rolling back to stable from testing, or similar operations.
+ type: bool
+ default: false
+ version_added: 1.2.0
+notes:
+- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+'''
+
+EXAMPLES = r'''
+- name: Install a package
+ community.general.pkgutil:
+ name: CSWcommon
+ state: present
+
+- name: Install a package from a specific repository
+ community.general.pkgutil:
+ name: CSWnrpe
+ site: ftp://myinternal.repo/opencsw/kiel
+ state: latest
+
+- name: Remove a package
+ community.general.pkgutil:
+ name: CSWtop
+ state: absent
+
+- name: Install several packages
+ community.general.pkgutil:
+ name:
+ - CSWsudo
+ - CSWtop
+ state: present
+
+- name: Update all packages
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+
+- name: Update all packages and force versions to match latest in catalog
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+ force: yes
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def packages_not_installed(module, names):
+ ''' Check if each package is installed and return list of the ones absent '''
+ pkgs = []
+ for pkg in names:
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc != 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_installed(module, names):
+ ''' Check if each package is installed and return list of the ones present '''
+ pkgs = []
+ for pkg in names:
+ if not pkg.startswith('CSW'):
+ continue
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc == 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_not_latest(module, names, site, update_catalog):
+ ''' Check status of each package and return list of the ones with an upgrade available '''
+ cmd = ['pkgutil']
+ if update_catalog:
+ cmd.append('-U')
+ cmd.append('-c')
+ if site is not None:
+ cmd.extend('-t', site)
+ if names != ['*']:
+ cmd.extend(names)
+ rc, out, err = run_command(module, cmd)
+
+ # Find packages in the catalog which are not up to date
+ packages = []
+ for line in out.split('\n')[1:-1]:
+ if 'catalog' not in line and 'SAME' not in line:
+ packages.append(line.split(' ')[0])
+
+ # Remove duplicates
+ return list(set(packages))
+
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+
+def package_install(module, state, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-iy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def package_upgrade(module, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-uy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd += pkgs
+ return run_command(module, cmd)
+
+
+def package_uninstall(module, pkgs):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-ry')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ site=dict(type='str'),
+ update_catalog=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ force = module.params['force']
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ if state in ['installed', 'present']:
+ # Fail with an explicit error when trying to "install" '*'
+ if name == ['*']:
+ module.fail_json(msg="Can not use 'state: present' with name: '*'")
+
+ # Build list of packages that are actually not installed from the ones requested
+ pkgs = packages_not_installed(module, name)
+
+ # If the package list is empty then all packages are already present
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['latest']:
+ # When using latest for *
+ if name == ['*']:
+ # Check for packages that are actually outdated
+ pkgs = packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list comes up empty, everything is already up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ # If there are packages to update, just empty the list and run the command without it
+ # pkgutil logic is to update all when run without packages names
+ pkgs = []
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+ else:
+ # Build list of packages that are either outdated or not installed
+ pkgs = packages_not_installed(module, name)
+ pkgs += packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list is empty that means all packages are installed and up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['absent', 'removed']:
+ # Build list of packages requested for removal that are actually present
+ pkgs = packages_installed(module, name)
+
+ # If the list is empty, no packages need to be removed
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_uninstall(module, pkgs)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent/up to date
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py
new file mode 100644
index 00000000..1f0fdc68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, William L Thomson Jr
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ aliases: [name]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - State of the package atom
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ type: str
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ type: bool
+ default: no
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ type: bool
+ default: no
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ type: bool
+ default: no
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ type: bool
+ default: no
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ type: bool
+ default: no
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ type: bool
+ default: yes
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ type: bool
+ default: no
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ type: bool
+ default: no
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ type: bool
+ default: no
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ type: bool
+ default: no
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ type: bool
+ default: no
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ choices: [ "web", "yes", "no" ]
+ type: str
+
+ getbinpkgonly:
+ description:
+ - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+ version_added: 1.3.0
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling).
+ type: bool
+ default: no
+
+ usepkg:
+ description:
+ - Tries to use the binary package(s) in the locally available packages directory.
+ type: bool
+ default: no
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ type: bool
+ default: no
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --jobs setting values
+ type: int
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --load-average setting values
+ type: float
+
+ quietbuild:
+ description:
+ - Redirect all build output to logs alone, and do not display it
+ - on stdout (--quiet-build)
+ type: bool
+ default: no
+
+ quietfail:
+ description:
+ - Suppresses display of the build log on stdout (--quiet-fail)
+ - Only the die message and the path of the build log will be
+ - displayed on stdout.
+ type: bool
+ default: no
+
+requirements: [ gentoolkit ]
+author:
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+'''
+
+EXAMPLES = '''
+- name: Make sure package foo is installed
+ community.general.portage:
+ package: foo
+ state: present
+
+- name: Make sure package foo is not installed
+ community.general.portage:
+ package: foo
+ state: absent
+
+- name: Update package foo to the latest version (os specific alternative to latest)
+ community.general.portage:
+ package: foo
+ update: yes
+
+- name: Install package foo using PORTAGE_BINHOST setup
+ community.general.portage:
+ package: foo
+ getbinpkg: yes
+
+- name: Re-install world from binary packages only and do not allow any compiling
+ community.general.portage:
+ package: '@world'
+ usepkgonly: yes
+
+- name: Sync repositories and update world
+ community.general.portage:
+ package: '@world'
+ update: yes
+ deep: yes
+ sync: yes
+
+- name: Remove unneeded packages
+ community.general.portage:
+ depclean: yes
+
+- name: Remove package foo if it is not explicitly needed
+ community.general.portage:
+ package: foo
+ state: absent
+ depclean: yes
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ """Run emerge command against given list of atoms."""
+ p = module.params
+
+ if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
+ for package in packages:
+ if p['noreplace'] and not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkgonly': '--getbinpkgonly',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
+ 'quietbuild': '--quiet-build',
+ 'quietfail': '--quiet-fail',
+ }
+ for flag, arg in emerge_flags.items():
+ if p[flag]:
+ args.append(arg)
+
+ if p['state'] and p['state'] == 'latest':
+ args.append("--update")
+
+ emerge_flags = {
+ 'jobs': '--jobs',
+ 'loadavg': '--load-average',
+ }
+
+ for flag, arg in emerge_flags.items():
+ flag_val = p[flag]
+
+ if flag_val is None:
+ """Fallback to default: don't use this argument at all."""
+ continue
+
+ if not flag_val:
+ """If the value is 0 or 0.0: add the flag, but not the value."""
+ args.append(arg)
+ continue
+
+ """Add the --flag=value pair."""
+ args.extend((arg, to_native(flag_val)))
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed', 'latest']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(type='list', elements='str', default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=True, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web', 'no']),
+ getbinpkgonly=dict(default=False, type='bool'),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
+ quietbuild=dict(default=False, type='bool'),
+ quietfail=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[
+ ['nodeps', 'onlydeps'],
+ ['quiet', 'verbose'],
+ ['quietbuild', 'verbose'],
+ ['quietfail', 'verbose'],
+ ],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync'] and p['sync'].strip() != 'no':
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'])
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py
new file mode 100644
index 00000000..d1c33cc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ type: bool
+ required: false
+ default: yes
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.portinstall:
+ name: foo
+ state: present
+
+- name: Install package security/cyrus-sasl2-saslauthd
+ community.general.portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.portinstall:
+ name: foo,bar
+ state: absent
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ # counts the number of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
+ shlex_quote(name_without_digits)),
+ use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages:
+ portinstall_params = "--use-packages"
+ else:
+ portinstall_params = ""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default=True)))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py
new file mode 100644
index 00000000..8dbc6b9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py
@@ -0,0 +1,754 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Joe Adams <@sysadmind>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pulp_repo
+author: "Joe Adams (@sysadmind)"
+short_description: Add or remove Pulp repos from a remote host.
+description:
+ - Add or remove Pulp repos from a remote host.
+options:
+ add_export_distributor:
+ description:
+ - Whether or not to add the export distributor to new C(rpm) repositories.
+ type: bool
+ default: no
+ feed:
+ description:
+ - Upstream feed URL to receive updates from.
+ type: str
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the M(ansible.builtin.uri) module only sends
+ authentication information when a webservice responds to an initial
+ request with a 401 status. Since some basic auth services do not
+ properly send a 401, logins will fail. This option forces the sending of
+ the Basic authentication header upon initial request.
+ type: bool
+ default: no
+ generate_sqlite:
+ description:
+ - Boolean flag to indicate whether sqlite files should be generated during
+ a repository publish.
+ required: false
+ type: bool
+ default: no
+ feed_ca_cert:
+ description:
+ - CA certificate string used to validate the feed source SSL certificate.
+ This can be the file content or the path to the file.
+ The ca_cert alias will be removed in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_ca_cert, ca_cert ]
+ feed_client_cert:
+ description:
+ - Certificate used as the client certificate when synchronizing the
+ repository. This is used to communicate authentication information to
+ the feed source. The value to this option must be the full path to the
+ certificate. The specified file may be the certificate itself or a
+ single file containing both the certificate and private key. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_cert. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_cert ]
+ feed_client_key:
+ description:
+ - Private key to the certificate specified in I(importer_ssl_client_cert),
+ assuming it is not included in the certificate file itself. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_key. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_key ]
+ name:
+ description:
+ - Name of the repo to add or remove. This correlates to repo-id in Pulp.
+ required: true
+ type: str
+ aliases: [ repo ]
+ proxy_host:
+ description:
+ - Proxy url setting for the pulp repository importer. This is in the
+ format scheme://host.
+ required: false
+ default: null
+ type: str
+ proxy_port:
+ description:
+ - Proxy port setting for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_username:
+ description:
+ - Proxy username for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_password:
+ description:
+ - Proxy password for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ publish_distributor:
+ description:
+ - Distributor to use when state is C(publish). The default is to
+ publish all distributors.
+ type: str
+ pulp_host:
+ description:
+ - URL of the pulp server to connect to.
+ default: https://127.0.0.1
+ type: str
+ relative_url:
+ description:
+ - Relative URL for the local repository. It's required when state=present.
+ type: str
+ repo_type:
+ description:
+ - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ default: rpm
+ type: str
+ repoview:
+ description:
+ - Whether to generate repoview files for a published repository. Setting
+ this to "yes" automatically activates `generate_sqlite`.
+ required: false
+ type: bool
+ default: no
+ serve_http:
+ description:
+ - Make the repo available over HTTP.
+ type: bool
+ default: no
+ serve_https:
+ description:
+ - Make the repo available over HTTPS.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The repo state. A state of C(sync) will queue a sync of the repo.
+ This is asynchronous but not delayed like a scheduled sync. A state of
+ C(publish) will use the repository's distributor to publish the content.
+ default: present
+ choices: [ "present", "absent", "sync", "publish" ]
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication to the pulp API.
+ If the I(url_username) parameter is not specified, the I(url_password)
+ parameter will not be used.
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication to the pulp API.
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ wait_for_completion:
+ description:
+ - Wait for asynchronous tasks to complete before returning.
+ type: bool
+ default: no
+notes:
+ - This module can currently only create distributors and importers on rpm
+ repositories. Contributions to support other repo types are welcome.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Create a new repo with name 'my_repo'
+ community.general.pulp_repo:
+ name: my_repo
+ relative_url: my/repo
+ state: present
+
+- name: Create a repo with a feed and a relative URL
+ community.general.pulp_repo:
+ name: my_centos_updates
+ repo_type: rpm
+ feed: http://mirror.centos.org/centos/6/updates/x86_64/
+ relative_url: centos/6/updates
+ url_username: admin
+ url_password: admin
+ force_basic_auth: yes
+ state: present
+
+- name: Remove a repo from the pulp server
+ community.general.pulp_repo:
+ name: my_old_repo
+ repo_type: rpm
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: Name of the repo that the action was performed on.
+ returned: success
+ type: str
+ sample: my_repo
+'''
+
+import json
+import os
+from time import sleep
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+
+
+class pulp_server(object):
+ """
+ Class to interact with a Pulp server
+ """
+
+ def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
+ self.module = module
+ self.host = pulp_host
+ self.repo_type = repo_type
+ self.repo_cache = dict()
+ self.wait_for_completion = wait_for_completion
+
+ def check_repo_exists(self, repo_id):
+ try:
+ self.get_repo_config_by_id(repo_id)
+ except IndexError:
+ return False
+ else:
+ return True
+
+ def compare_repo_distributor_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ for key, value in kwargs.items():
+ if key not in distributor['config'].keys():
+ return False
+
+ if not distributor['config'][key] == value:
+ return False
+
+ return True
+
+ def compare_repo_importer_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for importer in repo_config['importers']:
+ for key, value in kwargs.items():
+ if value is not None:
+ if key not in importer['config'].keys():
+ return False
+
+ if not importer['config'][key] == value:
+ return False
+
+ return True
+
+ def create_repo(
+ self,
+ repo_id,
+ relative_url,
+ feed=None,
+ generate_sqlite=False,
+ serve_http=False,
+ serve_https=True,
+ proxy_host=None,
+ proxy_port=None,
+ proxy_username=None,
+ proxy_password=None,
+ repoview=False,
+ ssl_ca_cert=None,
+ ssl_client_cert=None,
+ ssl_client_key=None,
+ add_export_distributor=False
+ ):
+ url = "%s/pulp/api/v2/repositories/" % self.host
+ data = dict()
+ data['id'] = repo_id
+ data['distributors'] = []
+
+ if self.repo_type == 'rpm':
+ yum_distributor = dict()
+ yum_distributor['distributor_id'] = "yum_distributor"
+ yum_distributor['distributor_type_id'] = "yum_distributor"
+ yum_distributor['auto_publish'] = True
+ yum_distributor['distributor_config'] = dict()
+ yum_distributor['distributor_config']['http'] = serve_http
+ yum_distributor['distributor_config']['https'] = serve_https
+ yum_distributor['distributor_config']['relative_url'] = relative_url
+ yum_distributor['distributor_config']['repoview'] = repoview
+ yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(yum_distributor)
+
+ if add_export_distributor:
+ export_distributor = dict()
+ export_distributor['distributor_id'] = "export_distributor"
+ export_distributor['distributor_type_id'] = "export_distributor"
+ export_distributor['auto_publish'] = False
+ export_distributor['distributor_config'] = dict()
+ export_distributor['distributor_config']['http'] = serve_http
+ export_distributor['distributor_config']['https'] = serve_https
+ export_distributor['distributor_config']['relative_url'] = relative_url
+ export_distributor['distributor_config']['repoview'] = repoview
+ export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(export_distributor)
+
+ data['importer_type_id'] = "yum_importer"
+ data['importer_config'] = dict()
+
+ if feed:
+ data['importer_config']['feed'] = feed
+
+ if proxy_host:
+ data['importer_config']['proxy_host'] = proxy_host
+
+ if proxy_port:
+ data['importer_config']['proxy_port'] = proxy_port
+
+ if proxy_username:
+ data['importer_config']['proxy_username'] = proxy_username
+
+ if proxy_password:
+ data['importer_config']['proxy_password'] = proxy_password
+
+ if ssl_ca_cert:
+ data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
+
+ if ssl_client_cert:
+ data['importer_config']['ssl_client_cert'] = ssl_client_cert
+
+ if ssl_client_key:
+ data['importer_config']['ssl_client_key'] = ssl_client_key
+
+ data['notes'] = {
+ "_repo-type": "rpm-repo"
+ }
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 201:
+ self.module.fail_json(
+ msg="Failed to create repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+ else:
+ return True
+
+ def delete_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='DELETE')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to delete repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def get_repo_config_by_id(self, repo_id):
+ if repo_id not in self.repo_cache.keys():
+ repo_array = [x for x in self.repo_list if x['id'] == repo_id]
+ self.repo_cache[repo_id] = repo_array[0]
+
+ return self.repo_cache[repo_id]
+
+ def publish_repo(self, repo_id, publish_distributor):
+ url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
+
+ # If there's no distributor specified, we will publish them all
+ if publish_distributor is None:
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ data = dict()
+ data['id'] = distributor['id']
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=distributor['id'])
+ else:
+ data = dict()
+ data['id'] = publish_distributor
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=publish_distributor)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def sync_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to schedule a sync of the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def update_repo_distributor_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ distributor_url = "%s%s/" % (url, distributor['id'])
+ data = dict()
+ data['distributor_config'] = dict()
+
+ for key, value in kwargs.items():
+ data['distributor_config'][key] = value
+
+ response, info = fetch_url(
+ self.module,
+ distributor_url,
+ data=json.dumps(data),
+ method='PUT')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the relative url for the repository.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ def update_repo_importer_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
+ data = dict()
+ importer_config = dict()
+
+ for key, value in kwargs.items():
+ if value is not None:
+ importer_config[key] = value
+
+ data['importer_config'] = importer_config
+
+ if self.repo_type == 'rpm':
+ data['importer_type_id'] = "yum_importer"
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the repo importer configuration",
+ status_code=info['status'],
+ response=info['msg'],
+ importer_config=importer_config,
+ url=url)
+
+ def set_repo_list(self):
+ url = "%s/pulp/api/v2/repositories/?details=true" % self.host
+ response, info = fetch_url(self.module, url, method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Request failed",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ self.repo_list = json.load(response)
+
+ def verify_tasks_completed(self, response_dict):
+ for task in response_dict['spawned_tasks']:
+ task_url = "%s%s" % (self.host, task['_href'])
+
+ while True:
+ response, info = fetch_url(
+ self.module,
+ task_url,
+ data='',
+ method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Failed to check async task status.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=task_url)
+
+ task_dict = json.load(response)
+
+ if task_dict['state'] == 'finished':
+ return True
+
+ if task_dict['state'] == 'error':
+ self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
+
+ sleep(2)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ add_export_distributor=dict(default=False, type='bool'),
+ feed=dict(),
+ generate_sqlite=dict(default=False, type='bool'),
+ feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'],
+ deprecated_aliases=[dict(name='ca_cert', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ feed_client_cert=dict(aliases=['importer_ssl_client_cert']),
+ feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True),
+ name=dict(required=True, aliases=['repo']),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ proxy_username=dict(),
+ proxy_password=dict(no_log=True),
+ publish_distributor=dict(),
+ pulp_host=dict(default="https://127.0.0.1"),
+ relative_url=dict(),
+ repo_type=dict(default="rpm"),
+ repoview=dict(default=False, type='bool'),
+ serve_http=dict(default=False, type='bool'),
+ serve_https=dict(default=True, type='bool'),
+ state=dict(
+ default="present",
+ choices=['absent', 'present', 'sync', 'publish']),
+ wait_for_completion=dict(default=False, type="bool"))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ add_export_distributor = module.params['add_export_distributor']
+ feed = module.params['feed']
+ generate_sqlite = module.params['generate_sqlite']
+ importer_ssl_ca_cert = module.params['feed_ca_cert']
+ importer_ssl_client_cert = module.params['feed_client_cert']
+ if importer_ssl_client_cert is None and module.params['client_cert'] is not None:
+ importer_ssl_client_cert = module.params['client_cert']
+ module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the "
+ "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since "
+ "Ansible 2.9.2). Until community.general 3.0.0, the default value for `feed_client_cert` will be "
+ "taken from `client_cert` if only the latter is specified",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ importer_ssl_client_key = module.params['feed_client_key']
+ if importer_ssl_client_key is None and module.params['client_key'] is not None:
+ importer_ssl_client_key = module.params['client_key']
+ module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until community.general 3.0.0 the default "
+ "value will come from client_key option",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ proxy_host = module.params['proxy_host']
+ proxy_port = module.params['proxy_port']
+ proxy_username = module.params['proxy_username']
+ proxy_password = module.params['proxy_password']
+ publish_distributor = module.params['publish_distributor']
+ pulp_host = module.params['pulp_host']
+ relative_url = module.params['relative_url']
+ repo = module.params['name']
+ repo_type = module.params['repo_type']
+ repoview = module.params['repoview']
+ serve_http = module.params['serve_http']
+ serve_https = module.params['serve_https']
+ state = module.params['state']
+ wait_for_completion = module.params['wait_for_completion']
+
+ if (state == 'present') and (not relative_url):
+ module.fail_json(msg="When state is present, relative_url is required.")
+
+ # Ensure that the importer_ssl_* is the content and not a file path
+ if importer_ssl_ca_cert is not None:
+ importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
+ if os.path.isfile(importer_ssl_ca_cert_file_path):
+ importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
+ try:
+ importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
+ finally:
+ importer_ssl_ca_cert_file_object.close()
+
+ if importer_ssl_client_cert is not None:
+ importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
+ if os.path.isfile(importer_ssl_client_cert_file_path):
+ importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
+ try:
+ importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
+ finally:
+ importer_ssl_client_cert_file_object.close()
+
+ if importer_ssl_client_key is not None:
+ importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
+ if os.path.isfile(importer_ssl_client_key_file_path):
+ importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
+ try:
+ importer_ssl_client_key = importer_ssl_client_key_file_object.read()
+ finally:
+ importer_ssl_client_key_file_object.close()
+
+ server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
+ server.set_repo_list()
+ repo_exists = server.check_repo_exists(repo)
+
+ changed = False
+
+ if state == 'absent' and repo_exists:
+ if not module.check_mode:
+ server.delete_repo(repo)
+
+ changed = True
+
+ if state == 'sync':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be synced.")
+
+ if not module.check_mode:
+ server.sync_repo(repo)
+
+ changed = True
+
+ if state == 'publish':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be published.")
+
+ if not module.check_mode:
+ server.publish_repo(repo, publish_distributor)
+
+ changed = True
+
+ if state == 'present':
+ if not repo_exists:
+ if not module.check_mode:
+ server.create_repo(
+ repo_id=repo,
+ relative_url=relative_url,
+ feed=feed,
+ generate_sqlite=generate_sqlite,
+ serve_http=serve_http,
+ serve_https=serve_https,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ repoview=repoview,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key,
+ add_export_distributor=add_export_distributor)
+
+ changed = True
+
+ else:
+ # Check to make sure all the settings are correct
+ # The importer config gets overwritten on set and not updated, so
+ # we set the whole config at the same time.
+ if not server.compare_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key
+ ):
+ if not module.check_mode:
+ server.update_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key)
+
+ changed = True
+
+ if relative_url is not None:
+ if not server.compare_repo_distributor_config(
+ repo,
+ relative_url=relative_url
+ ):
+ if not module.check_mode:
+ server.update_repo_distributor_config(
+ repo,
+ relative_url=relative_url)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, repoview=repoview):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, repoview=repoview)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, http=serve_http):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, http=serve_http)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, https=serve_https):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, https=serve_https)
+
+ changed = True
+
+ module.exit_json(changed=changed, repo=repo)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py
new file mode 100644
index 00000000..a4599588
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py
@@ -0,0 +1,930 @@
+#!/usr/bin/python
+
+# James Laska (jlaska@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redhat_subscription
+short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
+description:
+ - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
+author: "Barnaby Court (@barnabycourt)"
+notes:
+ - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
+ - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
+ I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
+ I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to None.
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ choices: [ "present", "absent" ]
+ default: "present"
+ type: str
+ username:
+ description:
+ - access.redhat.com or Sat6 username
+ type: str
+ password:
+ description:
+ - access.redhat.com or Sat6 password
+ type: str
+ server_hostname:
+ description:
+ - Specify an alternative Red Hat Subscription Management or Sat6 server
+ type: str
+ server_insecure:
+ description:
+ - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ type: str
+ rhsm_baseurl:
+ description:
+ - Specify CDN baseurl
+ type: str
+ rhsm_repo_ca_cert:
+ description:
+ - Specify an alternative location for a CA certificate for CDN
+ type: str
+ server_proxy_hostname:
+ description:
+ - Specify a HTTP proxy hostname
+ type: str
+ server_proxy_port:
+ description:
+ - Specify a HTTP proxy port
+ type: str
+ server_proxy_user:
+ description:
+ - Specify a user for HTTP proxy with basic authentication
+ type: str
+ server_proxy_password:
+ description:
+ - Specify a password for HTTP proxy with basic authentication
+ type: str
+ auto_attach:
+ description:
+ - Upon successful registration, auto-consume available subscriptions
+ - Added in favor of deprecated autosubscribe in 2.5.
+ type: bool
+ aliases: [autosubscribe]
+ activationkey:
+ description:
+ - supply an activation key for use with registration
+ type: str
+ org_id:
+ description:
+ - Organization ID to use in conjunction with activationkey
+ type: str
+ environment:
+ description:
+ - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
+ type: str
+ pool:
+ description:
+ - |
+ Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
+ possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ default: '^$'
+ type: str
+ pool_ids:
+ description:
+ - |
+ Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
+ C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ default: []
+ type: list
+ consumer_type:
+ description:
+ - The type of unit to register, defaults to system
+ type: str
+ consumer_name:
+ description:
+ - Name of the system to register, defaults to the hostname
+ type: str
+ consumer_id:
+ description:
+ - |
+ References an existing consumer ID to resume using a previous registration
+ for this system. If the system's identity certificate is lost or corrupted,
+ this option allows it to resume using its previous identity and subscriptions.
+ The default is to not specify a consumer ID so a new ID is created.
+ type: str
+ force_register:
+ description:
+ - Register the system even if it is already registered
+ type: bool
+ default: no
+ release:
+ description:
+ - Set a release version
+ type: str
+ syspurpose:
+ description:
+ - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
+ and synchronize these attributes with RHSM server. Syspurpose attributes help attach
+ the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
+ already contains some attributes, then new attributes overwrite existing attributes.
+ When some attribute is not listed in the new list of attributes, the existing
+ attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
+ type: dict
+ default: {}
+ suboptions:
+ usage:
+ description: Syspurpose attribute usage
+ type: str
+ role:
+ description: Syspurpose attribute role
+ type: str
+ service_level_agreement:
+ description: Syspurpose attribute service_level_agreement
+ type: str
+ addons:
+ description: Syspurpose attribute addons
+ type: list
+ sync:
+ description:
+ - When this option is true, then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is false, then syspurpose attributes
+ will be synchronized with RHSM server by rhsmcertd daemon.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+
+- name: Same as above but subscribe to a specific pool by ID.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids: 0123456789abcdef0123456789abcdef
+
+- name: Register and subscribe to multiple pools.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+- name: Same as above but consume multiple entitlements.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef: 2
+ - 1123456789abcdef0123456789abcdef: 4
+
+- name: Register and pull existing system data.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+
+- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^Red Hat Enterprise Server$'
+
+- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ auto_attach: true
+
+- name: Register as user (joe_user) with password (somepass) and a specific release
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ release: 7.4
+
+- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+ syspurpose:
+ usage: "Production"
+ role: "Red Hat Enterprise Server"
+ service_level_agreement: "Premium"
+ addons:
+ - addon1
+ - addon2
+ sync: true
+'''
+
+RETURN = '''
+subscribed_pool_ids:
+ description: List of pool IDs to which system is now subscribed
+ returned: success
+ type: complex
+ sample: {
+ "8a85f9815ab905d3015ab928c7005de4": "1"
+ }
+'''
+
+from os.path import isfile
+from os import unlink
+import re
+import shutil
+import tempfile
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves import configparser
+
+
+SUBMAN_CMD = None
+
+
+class RegistrationBase(object):
+
+ REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
+
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', '1')
+ else:
+ cfg.set('main', 'enabled', '0')
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.module = module
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHSM
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ args = [SUBMAN_CMD, 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--server.hostname'.
+ options = []
+ for k, v in sorted(kwargs.items()):
+ if re.search(r'^(server|rhsm)_', k) and v is not None:
+ options.append('--%s=%s' % (k.replace('_', '.', 1), v))
+
+ # When there is nothing to configure, then it is not necessary
+ # to run config command, because it only returns current
+ # content of current configuration file
+ if len(options) == 0:
+ return
+
+ args.extend(options)
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHSM.
+ '''
+
+ args = [SUBMAN_CMD, 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register, environment,
+ rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
+ server_proxy_port, server_proxy_user, server_proxy_password, release):
+ '''
+ Register the current system to the provided RHSM or Sat6 server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'register']
+
+ # Generate command arguments
+ if force_register:
+ args.extend(['--force'])
+
+ if rhsm_baseurl:
+ args.extend(['--baseurl', rhsm_baseurl])
+
+ if server_insecure:
+ args.extend(['--insecure'])
+
+ if server_hostname:
+ args.extend(['--serverurl', server_hostname])
+
+ if org_id:
+ args.extend(['--org', org_id])
+
+ if server_proxy_hostname and server_proxy_port:
+ args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
+
+ if server_proxy_user:
+ args.extend(['--proxyuser', server_proxy_user])
+
+ if server_proxy_password:
+ args.extend(['--proxypassword', server_proxy_password])
+
+ if activationkey:
+ args.extend(['--activationkey', activationkey])
+ else:
+ if auto_attach:
+ args.append('--auto-attach')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+ if consumer_type:
+ args.extend(['--type', consumer_type])
+ if consumer_name:
+ args.extend(['--name', consumer_name])
+ if consumer_id:
+ args.extend(['--consumerid', consumer_id])
+ if environment:
+ args.extend(['--environment', environment])
+
+ if release:
+ args.extend(['--release', release])
+
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def unsubscribe(self, serials=None):
+ '''
+ Unsubscribe a system from subscribed channels
+ Args:
+ serials(list or None): list of serials to unsubscribe. If
+ serials is none or an empty list, then
+ all subscribed channels will be removed.
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ items = []
+ if serials is not None and serials:
+ items = ["--serial=%s" % s for s in serials]
+ if serials is None:
+ items = ["--all"]
+
+ if items:
+ args = [SUBMAN_CMD, 'unsubscribe'] + items
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ return serials
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression. It matches regexp against available pool ids first.
+ If any pool ids match, subscribe to those pools and return.
+
+ If no pool ids match, then match regexp against available pool product
+ names. Note this can still easily match many many pools. Then subscribe
+ to those pools.
+
+ Since a pool id is a more specific match, we only fallback to matching
+ against names if we didn't match pool ids.
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ # See https://github.com/ansible/ansible/issues/19466
+
+ # subscribe to pools whose pool id matches regexp (and only the pool id)
+ subscribed_pool_ids = self.subscribe_pool(regexp)
+
+ # If we found any matches, we are done
+ # Don't attempt to match pools by product name
+ if subscribed_pool_ids:
+ return subscribed_pool_ids
+
+ # We didn't match any pool ids.
+ # Now try subscribing to pools based on product name match
+ # Note: This can match lots of product names.
+ subscribed_by_product_pool_ids = self.subscribe_product(regexp)
+ if subscribed_by_product_pool_ids:
+ return subscribed_by_product_pool_ids
+
+ # no matches
+ return []
+
+ def subscribe_by_pool_ids(self, pool_ids):
+ """
+ Try to subscribe to the list of pool IDs
+ """
+ available_pools = RhsmPools(self.module)
+
+ available_pool_ids = [p.get_pool_id() for p in available_pools]
+
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if pool_id in available_pool_ids:
+ args = [SUBMAN_CMD, 'attach', '--pool', pool_id]
+ if quantity is not None:
+ args.extend(['--quantity', to_native(quantity)])
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ else:
+ self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
+ return pool_ids
+
+ def subscribe_pool(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_pools(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def subscribe_product(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_products(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def update_subscriptions(self, regexp):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+ pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
+ pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
+
+ serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ subscribed_pool_ids = self.subscribe(regexp)
+
+ if subscribed_pool_ids or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
+ 'unsubscribed_serials': serials}
+
+ def update_subscriptions_by_pool_ids(self, pool_ids):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+
+ existing_pools = {}
+ for p in consumed_pools:
+ existing_pools[p.get_pool_id()] = p.QuantityUsed
+
+ serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ missing_pools = {}
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if existing_pools.get(pool_id, 0) != quantity:
+ missing_pools[pool_id] = quantity
+
+ self.subscribe_by_pool_ids(missing_pools)
+
+ if missing_pools or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
+ 'unsubscribed_serials': serials}
+
+ def sync_syspurpose(self):
+ """
+ Try to synchronize syspurpose attributes with server
+ """
+ args = [SUBMAN_CMD, 'status']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def get_pool_id(self):
+ return getattr(self, 'PoolId', getattr(self, 'PoolID'))
+
+ def subscribe(self):
+ args = "subscription-manager attach --pool %s" % self.get_pool_id()
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+
+ def __init__(self, module, consumed=False):
+ self.module = module
+ self.products = self._load_product_list(consumed)
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self, consumed=False):
+ """
+ Loads list of all available or consumed pools for system in data structure
+
+ Args:
+ consumed(bool): if True list consumed pools, else list available pools (default False)
+ """
+ args = "subscription-manager list"
+ if consumed:
+ args += " --consumed"
+ else:
+ args += " --available"
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of a output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter_pools(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose pool id matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product.get_pool_id()):
+ yield product
+
+ def filter_products(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose product name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
+
+
+class SysPurpose(object):
+ """
+ This class is used for reading and writing to syspurpose.json file
+ """
+
+ SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
+
+ ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
+
+ def __init__(self, path=None):
+ """
+ Initialize class used for reading syspurpose json file
+ """
+ self.path = path or self.SYSPURPOSE_FILE_PATH
+
+ def update_syspurpose(self, new_syspurpose):
+ """
+ Try to update current syspurpose with new attributes from new_syspurpose
+ """
+ syspurpose = {}
+ syspurpose_changed = False
+ for key, value in new_syspurpose.items():
+ if key in self.ALLOWED_ATTRIBUTES:
+ if value is not None:
+ syspurpose[key] = value
+ elif key == 'sync':
+ pass
+ else:
+ raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
+ (key, self.ALLOWED_ATTRIBUTES))
+ current_syspurpose = self._read_syspurpose()
+ if current_syspurpose != syspurpose:
+ syspurpose_changed = True
+ # Update current syspurpose with new values
+ current_syspurpose.update(syspurpose)
+ # When some key is not listed in new syspurpose, then delete it from current syspurpose
+ # and ignore custom attributes created by user (e.g. "foo": "bar")
+ for key in list(current_syspurpose):
+ if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
+ del current_syspurpose[key]
+ self._write_syspurpose(current_syspurpose)
+ return syspurpose_changed
+
+ def _write_syspurpose(self, new_syspurpose):
+ """
+ This function tries to update current new_syspurpose attributes to
+ json file.
+ """
+ with open(self.path, "w") as fp:
+ fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
+
+ def _read_syspurpose(self):
+ """
+ Read current syspurpuse from json file.
+ """
+ current_syspurpose = {}
+ try:
+ with open(self.path, "r") as fp:
+ content = fp.read()
+ except IOError:
+ pass
+ else:
+ current_syspurpose = json.loads(content)
+ return current_syspurpose
+
+
+def main():
+
+ # Load RHSM configuration from file
+ rhsm = Rhsm(None)
+
+ # Note: the default values for parameters are:
+ # 'type': 'str', 'default': None, 'required': False
+ # So there is no need to repeat these values for each parameter.
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'server_hostname': {},
+ 'server_insecure': {},
+ 'rhsm_baseurl': {},
+ 'rhsm_repo_ca_cert': {},
+ 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'activationkey': {'no_log': True},
+ 'org_id': {},
+ 'environment': {},
+ 'pool': {'default': '^$'},
+ 'pool_ids': {'default': [], 'type': 'list'},
+ 'consumer_type': {},
+ 'consumer_name': {},
+ 'consumer_id': {},
+ 'force_register': {'default': False, 'type': 'bool'},
+ 'server_proxy_hostname': {},
+ 'server_proxy_port': {},
+ 'server_proxy_user': {},
+ 'server_proxy_password': {'no_log': True},
+ 'release': {},
+ 'syspurpose': {
+ 'type': 'dict',
+ 'options': {
+ 'role': {},
+ 'usage': {},
+ 'service_level_agreement': {},
+ 'addons': {'type': 'list'},
+ 'sync': {'type': 'bool', 'default': False}
+ }
+ }
+ },
+ required_together=[['username', 'password'],
+ ['server_proxy_hostname', 'server_proxy_port'],
+ ['server_proxy_user', 'server_proxy_password']],
+ mutually_exclusive=[['activationkey', 'username'],
+ ['activationkey', 'consumer_id'],
+ ['activationkey', 'environment'],
+ ['activationkey', 'autosubscribe'],
+ ['pool', 'pool_ids']],
+ required_if=[['state', 'present', ['username', 'activationkey'], True]],
+ )
+
+ rhsm.module = module
+ state = module.params['state']
+ username = module.params['username']
+ password = module.params['password']
+ server_hostname = module.params['server_hostname']
+ server_insecure = module.params['server_insecure']
+ rhsm_baseurl = module.params['rhsm_baseurl']
+ rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
+ auto_attach = module.params['auto_attach']
+ activationkey = module.params['activationkey']
+ org_id = module.params['org_id']
+ if activationkey and not org_id:
+ module.fail_json(msg='org_id is required when using activationkey')
+ environment = module.params['environment']
+ pool = module.params['pool']
+ pool_ids = {}
+ for value in module.params['pool_ids']:
+ if isinstance(value, dict):
+ if len(value) != 1:
+ module.fail_json(msg='Unable to parse pool_ids option.')
+ pool_id, quantity = list(value.items())[0]
+ else:
+ pool_id, quantity = value, None
+ pool_ids[pool_id] = quantity
+ consumer_type = module.params["consumer_type"]
+ consumer_name = module.params["consumer_name"]
+ consumer_id = module.params["consumer_id"]
+ force_register = module.params["force_register"]
+ server_proxy_hostname = module.params['server_proxy_hostname']
+ server_proxy_port = module.params['server_proxy_port']
+ server_proxy_user = module.params['server_proxy_user']
+ server_proxy_password = module.params['server_proxy_password']
+ release = module.params['release']
+ syspurpose = module.params['syspurpose']
+
+ global SUBMAN_CMD
+ SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
+
+ syspurpose_changed = False
+ if syspurpose is not None:
+ try:
+ syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
+ except Exception as err:
+ module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Register system
+ if rhsm.is_registered and not force_register:
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ try:
+ rhsm.sync_syspurpose()
+ except Exception as e:
+ module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
+ if pool != '^$' or pool_ids:
+ try:
+ if pool_ids:
+ result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
+ else:
+ result = rhsm.update_subscriptions(pool)
+ except Exception as e:
+ module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(**result)
+ else:
+ if syspurpose_changed is True:
+ module.exit_json(changed=True, msg="Syspurpose attributes changed.")
+ else:
+ module.exit_json(changed=False, msg="System already registered.")
+ else:
+ try:
+ rhsm.enable()
+ rhsm.configure(**module.params)
+ rhsm.register(username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register,
+ environment, rhsm_baseurl, server_insecure, server_hostname,
+ server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ rhsm.sync_syspurpose()
+ if pool_ids:
+ subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
+ elif pool != '^$':
+ subscribed_pool_ids = rhsm.subscribe(pool)
+ else:
+ subscribed_pool_ids = []
+ except Exception as e:
+ module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(changed=True,
+ msg="System successfully registered to '%s'." % server_hostname,
+ subscribed_pool_ids=subscribed_pool_ids)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhsm.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+ else:
+ try:
+ rhsm.unsubscribe()
+ rhsm.unregister()
+ except Exception as e:
+ module.fail_json(msg="Failed to unregister: %s" % to_native(e))
+ else:
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py
new file mode 100644
index 00000000..63be0323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# Copyright: (c) Vincent Van de Kussen
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhn_channel
+short_description: Adds or removes Red Hat software channels
+description:
+ - Adds or removes Red Hat software channels.
+author:
+- Vincent Van der Kussen (@vincentvdk)
+notes:
+ - This module fetches the system id from RHN.
+ - This module doesn't support I(check_mode).
+options:
+ name:
+ description:
+ - Name of the software channel.
+ required: true
+ type: str
+ sysname:
+ description:
+ - Name of the system as it is known in RHN/Satellite.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the channel should be present or not, taking action if the state is different from what is stated.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ url:
+ description:
+ - The full URL to the RHN/Satellite API.
+ required: true
+ type: str
+ user:
+ description:
+ - RHN/Satellite login.
+ required: true
+ type: str
+ password:
+ description:
+ - RHN/Satellite password.
+ aliases: [pwd]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - If C(False), SSL certificates will not be validated.
+ - This should only set to C(False) when used on self controlled sites
+ using self-signed certificates, and you are absolutely sure that nobody
+ can modify traffic between the module and the site.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Add a Red Hat software channel
+ community.general.rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
+ delegate_to: localhost
+'''
+
+import ssl
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+def get_systemid(client, session, sysname):
+ systems = client.system.listUserSystems(session)
+ for system in systems:
+ if system.get('name') == sysname:
+ idres = system.get('id')
+ idd = int(idres)
+ return idd
+
+
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def base_channels(client, session, sys_id):
+ basechan = client.channel.software.listSystemChannels(session, sys_id)
+ try:
+ chans = [item['label'] for item in basechan]
+ except KeyError:
+ chans = [item['channel_label'] for item in basechan]
+ return chans
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ sysname=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=True, aliases=['pwd'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ )
+ )
+
+ state = module.params['state']
+ channelname = module.params['name']
+ systname = module.params['sysname']
+ saturl = module.params['url']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl_context = ssl._create_unverified_context()
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ # initialize connection
+ if ssl_context:
+ client = xmlrpc_client.ServerProxy(saturl, context=ssl_context)
+ else:
+ client = xmlrpc_client.Server(saturl)
+
+ try:
+ session = client.auth.login(user, password)
+ except Exception as e:
+ module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e))
+
+ if not session:
+ module.fail_json(msg="Failed to establish session with Satellite server.")
+
+ # get systemid
+ try:
+ sys_id = get_systemid(client, session, systname)
+ except Exception as e:
+ module.fail_json(msg="Unable to get system id: %s " % to_text(e))
+
+ if not sys_id:
+ module.fail_json(msg="Failed to get system id.")
+
+ # get channels for system
+ try:
+ chans = base_channels(client, session, sys_id)
+ except Exception as e:
+ module.fail_json(msg="Unable to get channel information: %s " % to_text(e))
+
+ try:
+ if state == 'present':
+ if channelname in chans:
+ module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
+ else:
+ subscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s added" % channelname)
+
+ if state == 'absent':
+ if channelname not in chans:
+ module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
+ else:
+ unsubscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s removed" % channelname)
+ except Exception as e:
+ module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e)))
+ finally:
+ client.auth.logout(session)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py
new file mode 100644
index 00000000..dfc408a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) James Laska
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhn_register
+short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
+description:
+ - Manage registration to the Red Hat Network.
+author:
+- James Laska (@jlaska)
+notes:
+ - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead.
+ - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
+requirements:
+ - rhnreg_ks
+ - either libxml2 or lxml
+options:
+ state:
+ description:
+ - Whether to register (C(present)), or unregister (C(absent)) a system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ username:
+ description:
+ - Red Hat Network username.
+ type: str
+ password:
+ description:
+ - Red Hat Network password.
+ type: str
+ server_url:
+ description:
+ - Specify an alternative Red Hat Network server URL.
+ - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ type: str
+ activationkey:
+ description:
+ - Supply an activation key for use with registration.
+ type: str
+ profilename:
+ description:
+ - Supply an profilename for use with registration.
+ type: str
+ ca_cert:
+ description:
+ - Supply a custom ssl CA certificate file for use with registration.
+ type: path
+ aliases: [ sslcacert ]
+ systemorgid:
+ description:
+ - Supply an organizational id for use with registration.
+ type: str
+ channels:
+ description:
+ - Optionally specify a list of channels to subscribe to upon successful registration.
+ type: list
+ elements: str
+ default: []
+ enable_eus:
+ description:
+ - If C(no), extended update support will be requested.
+ type: bool
+ default: no
+ nopackages:
+ description:
+ - If C(yes), the registered node will not upload its installed packages information to Satellite server.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Unregister system from RHN
+ community.general.rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
+
+- name: Register as user with password and auto-subscribe to available content
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+
+- name: Register with activationkey and enable extended update support
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: yes
+
+- name: Register with activationkey and set a profilename which may differ from the hostname
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
+
+- name: Register as user with password against a satellite server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+
+- name: Register as user with password and enable channels
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import os
+import sys
+
+# Attempt to import rhn client tools
+sys.path.insert(0, '/usr/share/rhn')
+try:
+ import up2date_client
+ import up2date_client.config
+ HAS_UP2DATE_CLIENT = True
+except ImportError:
+ HAS_UP2DATE_CLIENT = False
+
+# INSERT REDHAT SNIPPETS
+from ansible_collections.community.general.plugins.module_utils import redhat
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import urllib, xmlrpc_client
+
+
+class Rhn(redhat.RegistrationBase):
+
+ def __init__(self, module=None, username=None, password=None):
+ redhat.RegistrationBase.__init__(self, module, username, password)
+ self.config = self.load_config()
+ self.server = None
+ self.session = None
+
+ def logout(self):
+ if self.session is not None:
+ self.server.auth.logout(self.session)
+
+ def load_config(self):
+ '''
+ Read configuration from /etc/sysconfig/rhn/up2date
+ '''
+ if not HAS_UP2DATE_CLIENT:
+ return None
+
+ config = up2date_client.config.initUp2dateConfig()
+
+ return config
+
+ @property
+ def server_url(self):
+ return self.config['serverURL']
+
+ @property
+ def hostname(self):
+ '''
+ Return the non-xmlrpc RHN hostname. This is a convenience method
+ used for displaying a more readable RHN hostname.
+
+ Returns: str
+ '''
+ url = urllib.parse.urlparse(self.server_url)
+ return url[1].replace('xmlrpc.', '')
+
+ @property
+ def systemid(self):
+ systemid = None
+ xpath_str = "//member[name='system_id']/value/string"
+
+ if os.path.isfile(self.config['systemIdPath']):
+ fd = open(self.config['systemIdPath'], 'r')
+ xml_data = fd.read()
+ fd.close()
+
+ # Ugh, xml parsing time ...
+ # First, try parsing with libxml2 ...
+ if systemid is None:
+ try:
+ import libxml2
+ doc = libxml2.parseDoc(xml_data)
+ ctxt = doc.xpathNewContext()
+ systemid = ctxt.xpathEval(xpath_str)[0].content
+ doc.freeDoc()
+ ctxt.xpathFreeContext()
+ except ImportError:
+ pass
+
+ # m-kay, let's try with lxml now ...
+ if systemid is None:
+ try:
+ from lxml import etree
+ root = etree.fromstring(xml_data)
+ systemid = root.xpath(xpath_str)[0].text
+ except ImportError:
+ raise Exception('"libxml2" or "lxml" is required for this module.')
+
+ # Strip the 'ID-' prefix
+ if systemid is not None and systemid.startswith('ID-'):
+ systemid = systemid[3:]
+
+ return int(systemid)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system is registered.
+
+ Returns: True|False
+ '''
+ return os.path.isfile(self.config['systemIdPath'])
+
+ def configure_server_url(self, server_url):
+ '''
+ Configure server_url for registration
+ '''
+
+ self.config.set('serverURL', server_url)
+ self.config.save()
+
+ def enable(self):
+ '''
+ Prepare the system for RHN registration. This includes ...
+ * enabling the rhnplugin yum plugin
+ * disabling the subscription-manager yum plugin
+ '''
+ redhat.RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', True)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
+ '''
+ Register system to RHN. If enable_eus=True, extended update
+ support will be requested.
+ '''
+ register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
+ if self.username:
+ register_cmd.extend(['--username', self.username, '--password', self.password])
+ if self.server_url:
+ register_cmd.extend(['--serverUrl', self.server_url])
+ if enable_eus:
+ register_cmd.append('--use-eus-channel')
+ if nopackages:
+ register_cmd.append('--nopackages')
+ if activationkey is not None:
+ register_cmd.extend(['--activationkey', activationkey])
+ if profilename is not None:
+ register_cmd.extend(['--profilename', profilename])
+ if sslcacert is not None:
+ register_cmd.extend(['--sslCACert', sslcacert])
+ if systemorgid is not None:
+ register_cmd.extend(['--systemorgid', systemorgid])
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
+
+ def api(self, method, *args):
+ '''
+ Convenience RPC wrapper
+ '''
+ if self.server is None:
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ self.server = xmlrpc_client.ServerProxy(url)
+ self.session = self.server.auth.login(self.username, self.password)
+
+ func = getattr(self.server, method)
+ return func(self.session, *args)
+
+ def unregister(self):
+ '''
+ Unregister a previously registered system
+ '''
+
+ # Initiate RPC connection
+ self.api('system.deleteSystems', [self.systemid])
+
+ # Remove systemid file
+ os.unlink(self.config['systemIdPath'])
+
+ def subscribe(self, channels):
+ if not channels:
+ return
+
+ if self._is_hosted():
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ new_channels = [item['channel_label'] for item in current_channels]
+ new_channels.extend(channels)
+ return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
+
+ else:
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ current_channels = [item['label'] for item in current_channels]
+ new_base = None
+ new_childs = []
+ for ch in channels:
+ if ch in current_channels:
+ continue
+ if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
+ new_base = ch
+ else:
+ if ch not in new_childs:
+ new_childs.append(ch)
+ out_base = 0
+ out_childs = 0
+
+ if new_base:
+ out_base = self.api('system.setBaseChannel', self.systemid, new_base)
+
+ if new_childs:
+ out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
+
+ return out_base and out_childs
+
+ def _is_hosted(self):
+ '''
+ Return True if we are running against Hosted (rhn.redhat.com) or
+ False otherwise (when running against Satellite or Spacewalk)
+ '''
+ return 'rhn.redhat.com' in self.hostname
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ server_url=dict(type='str'),
+ activationkey=dict(type='str', no_log=True),
+ profilename=dict(type='str'),
+ ca_cert=dict(type='path', aliases=['sslcacert']),
+ systemorgid=dict(type='str'),
+ enable_eus=dict(type='bool', default=False),
+ nopackages=dict(type='bool', default=False),
+ channels=dict(type='list', elements='str', default=[]),
+ ),
+ # username/password is required for state=absent, or if channels is not empty
+ # (basically anything that uses self.api requires username/password) but it doesn't
+ # look like we can express that with required_if/required_together/mutually_exclusive
+
+ # only username+password can be used for unregister
+ required_if=[['state', 'absent', ['username', 'password']]],
+ )
+
+ if not HAS_UP2DATE_CLIENT:
+ module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+
+ state = module.params['state']
+ activationkey = module.params['activationkey']
+ profilename = module.params['profilename']
+ sslcacert = module.params['ca_cert']
+ systemorgid = module.params['systemorgid']
+ channels = module.params['channels']
+ enable_eus = module.params['enable_eus']
+ nopackages = module.params['nopackages']
+
+ rhn = Rhn(module=module, username=username, password=password)
+
+ # use the provided server url and persist it to the rhn config.
+ if server_url:
+ rhn.configure_server_url(server_url)
+
+ if not rhn.server_url:
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Check for missing parameters ...
+ if not (activationkey or rhn.username or rhn.password):
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
+ if not activationkey and not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
+
+ # Register system
+ if rhn.is_registered:
+ module.exit_json(changed=False, msg="System already registered.")
+
+ try:
+ rhn.enable()
+ rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
+ rhn.subscribe(channels)
+ except Exception as exc:
+ module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhn.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+
+ if not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
+
+ try:
+ rhn.unregister()
+ except Exception as exc:
+ module.fail_json(msg="Failed to unregister: %s" % exc)
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py
new file mode 100644
index 00000000..22b280f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+
+# (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_release
+short_description: Set or Unset RHSM Release version
+description:
+ - Sets or unsets the release version used by RHSM repositories.
+notes:
+ - This module will fail on an unregistered system.
+ Use the C(redhat_subscription) module to register a system
+ prior to setting the RHSM release.
+requirements:
+ - Red Hat Enterprise Linux 6+ with subscription-manager installed
+options:
+ release:
+ description:
+ - RHSM release version to use (use null to unset)
+ required: true
+ type: str
+author:
+ - Sean Myers (@seandst)
+'''
+
+EXAMPLES = '''
+# Set release version to 7.1
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "7.1"
+
+# Set release version to 6Server
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "6Server"
+
+# Unset release version
+- name: Unset RHSM release release
+ community.general.rhsm_release:
+ release: null
+'''
+
+RETURN = '''
+current_release:
+ description: The current RHSM release version value
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import re
+
+# Matches release-like values such as 7.2, 6.10, 10Server,
+# but rejects unlikely values, like 100Server, 100.0, 1.100, etc.
+release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b')
+
+
+def _sm_release(module, *args):
+ # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
+ # "subscription-manager release --set 0.1"
+ sm_bin = module.get_bin_path('subscription-manager', required=True)
+ cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ # delegate nonzero rc handling to run_command
+ return module.run_command(cmd, check_rc=True)
+
+
+def get_release(module):
+ # Get the current release version, or None if release unset
+ rc, out, err = _sm_release(module, '--show')
+ try:
+ match = release_matcher.findall(out)[0]
+ except IndexError:
+ # 0'th index did not exist; no matches
+ match = None
+
+ return match
+
+
+def set_release(module, release):
+ # Set current release version, or unset if release is None
+ if release is None:
+ args = ('--unset',)
+ else:
+ args = ('--set', release)
+
+ return _sm_release(module, *args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ release=dict(type='str', required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ target_release = module.params['release']
+
+ # sanity check: the target release at least looks like a valid release
+ if target_release and not release_matcher.findall(target_release):
+ module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release))
+
+ # Will fail with useful error from s-m if system not subscribed
+ current_release = get_release(module)
+
+ changed = (target_release != current_release)
+ if not module.check_mode and changed:
+ set_release(module, target_release)
+ # If setting the release fails, then a fail_json would have exited with
+ # the s-m error, e.g. "No releases match '7.20'...". If not, then the
+ # current release is now set to the target release (job's done)
+ current_release = target_release
+
+ module.exit_json(current_release=current_release, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py
new file mode 100644
index 00000000..7317be66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_repository
+short_description: Manage RHSM repositories using the subscription-manager command
+description:
+ - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
+ Management entitlement platform using the C(subscription-manager) command.
+author: Giovanni Sciortino (@giovannisciortino)
+notes:
+ - In order to manage RHSM repositories the system must be already registered
+ to RHSM manually or using the Ansible C(redhat_subscription) module.
+
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - If state is equal to present or disabled, indicates the desired
+ repository state.
+ choices: [present, enabled, absent, disabled]
+ default: "enabled"
+ type: str
+ name:
+ description:
+ - The ID of repositories to enable.
+ - To operate on several repositories this can accept a comma separated
+ list or a YAML list.
+ required: True
+ type: list
+ elements: str
+ purge:
+ description:
+ - Disable all currently enabled repositories that are not not specified in C(name).
+ Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ Using this with C(loop) will most likely not have the desired result.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Enable a RHSM repository
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+
+- name: Disable all RHSM repositories
+ community.general.rhsm_repository:
+ name: '*'
+ state: disabled
+
+- name: Enable all repositories starting with rhel-6-server
+ community.general.rhsm_repository:
+ name: rhel-6-server*
+ state: enabled
+
+- name: Disable all repositories except rhel-7-server-rpms
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+ purge: True
+'''
+
+RETURN = '''
+repositories:
+ description:
+ - The list of RHSM repositories with their states.
+ - When this module is used to change the repository states, this list contains the updated states after the changes.
+ returned: success
+ type: list
+'''
+
+import re
+import os
+from fnmatch import fnmatch
+from copy import deepcopy
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_subscription_manager(module, arguments):
+ # Execute subscription-manager with arguments and manage common errors
+ rhsm_bin = module.get_bin_path('subscription-manager')
+ if not rhsm_bin:
+ module.fail_json(msg='The executable file subscription-manager was not found in PATH')
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
+
+ if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
+ module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
+ elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+
+def get_repository_list(module, list_parameter):
+ # Generate RHSM repository list and return a list of dict
+ if list_parameter == 'list_enabled':
+ rhsm_arguments = ['repos', '--list-enabled']
+ elif list_parameter == 'list_disabled':
+ rhsm_arguments = ['repos', '--list-disabled']
+ elif list_parameter == 'list':
+ rhsm_arguments = ['repos', '--list']
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+
+ skip_lines = [
+ '+----------------------------------------------------------+',
+ ' Available Repositories in /etc/yum.repos.d/redhat.repo'
+ ]
+ repo_id_re = re.compile(r'Repo ID:\s+(.*)')
+ repo_name_re = re.compile(r'Repo Name:\s+(.*)')
+ repo_url_re = re.compile(r'Repo URL:\s+(.*)')
+ repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ if line == '' or line in skip_lines:
+ continue
+
+ repo_id_match = repo_id_re.match(line)
+ if repo_id_match:
+ repo_id = repo_id_match.group(1)
+ continue
+
+ repo_name_match = repo_name_re.match(line)
+ if repo_name_match:
+ repo_name = repo_name_match.group(1)
+ continue
+
+ repo_url_match = repo_url_re.match(line)
+ if repo_url_match:
+ repo_url = repo_url_match.group(1)
+ continue
+
+ repo_enabled_match = repo_enabled_re.match(line)
+ if repo_enabled_match:
+ repo_enabled = repo_enabled_match.group(1)
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, state, name, purge=False):
+ name = set(name)
+ current_repo_list = get_repository_list(module, 'list')
+ updated_repo_list = deepcopy(current_repo_list)
+ matched_existing_repo = {}
+ for repoid in name:
+ matched_existing_repo[repoid] = []
+ for idx, repo in enumerate(current_repo_list):
+ if fnmatch(repo['id'], repoid):
+ matched_existing_repo[repoid].append(repo)
+ # Update current_repo_list to return it as result variable
+ updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
+
+ changed = False
+ results = []
+ diff_before = ""
+ diff_after = ""
+ rhsm_arguments = ['repos']
+
+ for repoid in matched_existing_repo:
+ if len(matched_existing_repo[repoid]) == 0:
+ results.append("%s is not a valid repository ID" % repoid)
+ module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
+ for repo in matched_existing_repo[repoid]:
+ if state in ['disabled', 'absent']:
+ if repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
+ results.append("Repository '%s' is disabled for this system" % repo['id'])
+ rhsm_arguments += ['--disable', repo['id']]
+ elif state in ['enabled', 'present']:
+ if not repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
+ results.append("Repository '%s' is enabled for this system" % repo['id'])
+ rhsm_arguments += ['--enable', repo['id']]
+
+ # Disable all enabled repos on the system that are not in the task and not
+ # marked as disabled by the task
+ if purge:
+ enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
+ matched_repoids_set = set(matched_existing_repo.keys())
+ difference = enabled_repo_ids.difference(matched_repoids_set)
+ if len(difference) > 0:
+ for repoid in difference:
+ changed = True
+ diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
+ diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
+ results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
+ rhsm_arguments.extend(['--disable', repoid])
+
+ diff = {'before': diff_before,
+ 'after': diff_after,
+ 'before_header': "RHSM repositories",
+ 'after_header': "RHSM repositories"}
+
+ if not module.check_mode and changed:
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ results = out.splitlines()
+ module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
+ purge=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ purge = module.params['purge']
+
+ repository_modify(module, state, name, purge)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py
new file mode 100644
index 00000000..424f5b1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ type: list
+ elements: str
+ aliases: [pkg]
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
+ required: false
+ default: present
+ type: str
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ type: bool
+ aliases: [update-cache]
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.slackpkg:
+ name: foo
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.slackpkg:
+ name: foo,bar
+ state: absent
+
+- name: Make sure that it is the most updated package
+ community.general.slackpkg:
+ name: foo
+ state: latest
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, slackpkg_path, name):
+
+ import platform
+ import os
+ import re
+
+ machine = platform.machine()
+ # Exception for kernel-headers package on x86_64
+ if name == 'kernel-headers' and machine == 'x86_64':
+ machine = 'x86'
+ pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
+ packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ update_cache=dict(default=False, aliases=["update-cache"],
+ type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py
new file mode 100644
index 00000000..9776b4e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
+# Copyright: (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap
+
+short_description: Manages snaps
+
+
+description:
+ - "Manages snaps packages."
+
+options:
+ name:
+ description:
+ - Name of the snap to install or remove. Can be a list of snaps.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: present
+ choices: [ absent, present ]
+ type: str
+ classic:
+ description:
+ - Confinement policy. The classic confinement allows a snap to have
+ the same level of access to the system as "classic" packages,
+ like those managed by APT. This option corresponds to the --classic argument.
+ This option can only be specified if there is a single snap in the task.
+ type: bool
+ required: false
+ default: no
+ channel:
+ description:
+ - Define which release of a snap is installed and tracked for updates.
+ This option can only be specified if there is a single snap in the task.
+ type: str
+ required: false
+ default: stable
+
+author:
+ - Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
+ - Stanislas Lange (@angristan) <angristan@pm.me>
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Install foo
+ community.general.snap:
+ name:
+ - foo
+ - bar
+
+# Remove "foo" snap
+- name: Remove foo
+ community.general.snap:
+ name: foo
+ state: absent
+
+# Install a snap with classic confinement
+- name: Install "foo" with option --classic
+ community.general.snap:
+ name: foo
+ classic: yes
+
+# Install a snap with from a specific channel
+- name: Install "foo" with option --channel=latest/edge
+ community.general.snap:
+ name: foo
+ channel: latest/edge
+'''
+
+RETURN = '''
+classic:
+ description: Whether or not the snaps were installed with the classic confinement
+ type: bool
+ returned: When snaps are installed
+channel:
+ description: The channel the snaps were installed from
+ type: str
+ returned: When snaps are installed
+cmd:
+ description: The command that was executed on the host
+ type: str
+ returned: When changed is true
+snaps_installed:
+ description: The list of actually installed snaps
+ type: list
+ returned: When any snaps have been installed
+snaps_removed:
+ description: The list of actually removed snaps
+ type: list
+ returned: When any snaps have been removed
+'''
+
+import operator
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def validate_input_snaps(module):
+ """Ensure that all exist."""
+ for snap_name in module.params['name']:
+ if not snap_exists(module, snap_name):
+ module.fail_json(msg="No snap matching '%s' available." % snap_name)
+
+
+def snap_exists(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'info', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def is_snap_installed(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'list', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def get_snap_for_action(module):
+ """Construct a list of snaps to use for current action."""
+ snaps = module.params['name']
+
+ is_present_state = module.params['state'] == 'present'
+ negation_predicate = operator.not_ if is_present_state else bool
+
+ def predicate(s):
+ return negation_predicate(is_snap_installed(module, s))
+
+ return [s for s in snaps if predicate(s)]
+
+
+def get_base_cmd_parts(module):
+ action_map = {
+ 'present': 'install',
+ 'absent': 'remove',
+ }
+
+ state = module.params['state']
+
+ classic = ['--classic'] if module.params['classic'] else []
+ channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else []
+
+ snap_path = module.get_bin_path("snap", True)
+ snap_action = action_map[state]
+
+ cmd_parts = [snap_path, snap_action]
+ if snap_action == 'install':
+ cmd_parts += classic + channel
+
+ return cmd_parts
+
+
+def get_cmd_parts(module, snap_names):
+ """Return list of cmds to run in exec format."""
+ is_install_mode = module.params['state'] == 'present'
+ has_multiple_snaps = len(snap_names) > 1
+
+ cmd_parts = get_base_cmd_parts(module)
+ has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts
+
+ if not (is_install_mode and has_one_pkg_params and has_multiple_snaps):
+ return [cmd_parts + snap_names]
+
+ return [cmd_parts + [s] for s in snap_names]
+
+
+def run_cmd_for(module, snap_names):
+ cmds_parts = get_cmd_parts(module, snap_names)
+ cmd = '; '.join(' '.join(c) for c in cmds_parts)
+ cmd = 'sh -c "{0}"'.format(cmd)
+
+ # Actually execute the snap command
+ return (cmd, ) + module.run_command(cmd, check_rc=False)
+
+
+def execute_action(module):
+ is_install_mode = module.params['state'] == 'present'
+ exit_kwargs = {
+ 'classic': module.params['classic'],
+ 'channel': module.params['channel'],
+ } if is_install_mode else {}
+
+ actionable_snaps = get_snap_for_action(module)
+ if not actionable_snaps:
+ module.exit_json(changed=False, **exit_kwargs)
+
+ changed_def_args = {
+ 'changed': True,
+ 'snaps_{result}'.
+ format(result='installed' if is_install_mode
+ else 'removed'): actionable_snaps,
+ }
+
+ if module.check_mode:
+ module.exit_json(**dict(changed_def_args, **exit_kwargs))
+
+ cmd, rc, out, err = run_cmd_for(module, actionable_snaps)
+ cmd_out_args = {
+ 'cmd': cmd,
+ 'rc': rc,
+ 'stdout': out,
+ 'stderr': err,
+ }
+
+ if rc == 0:
+ module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs)))
+ else:
+ msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd)
+ if is_install_mode:
+ m = re.match(r'^error: This revision of snap "(?P<package_name>\w+)" was published using classic confinement', err)
+ if m is not None:
+ err_pkg = m.group('package_name')
+ msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg)
+ module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs))
+
+
+def main():
+ module_args = {
+ 'name': dict(type='list', elements='str', required=True),
+ 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']),
+ 'classic': dict(type='bool', required=False, default=False),
+ 'channel': dict(type='str', required=False, default='stable'),
+ }
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ validate_input_snaps(module)
+
+ # Apply changes to the snaps
+ execute_action(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py
new file mode 100644
index 00000000..347413fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sorcery
+short_description: Package manager for Source Mage GNU/Linux
+description:
+ - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
+author: "Vlad Glagolev (@vaygr)"
+notes:
+ - When all three components are selected, the update goes by the sequence --
+ Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
+ - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
+ yet supported.
+requirements:
+ - bash
+options:
+ name:
+ description:
+ - Name of the spell
+ - multiple names can be given, separated by commas
+ - special value '*' in conjunction with states C(latest) or
+ C(rebuild) will update or rebuild the whole system respectively
+ aliases: ["spell"]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to cast, dispel or rebuild a package
+ - state C(cast) is an equivalent of C(present), not C(latest)
+ - state C(latest) always triggers C(update_cache=yes)
+ - state C(rebuild) implies cast of all specified spells, not only
+ those existed before
+ choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
+ default: "present"
+ type: str
+
+ depends:
+ description:
+ - Comma-separated list of _optional_ dependencies to build a spell
+ (or make sure it is built) with; use +/- in front of dependency
+ to turn it on/off ('+' is optional though)
+ - this option is ignored if C(name) parameter is equal to '*' or
+ contains more than one spell
+ - providers must be supplied in the form recognized by Sorcery, e.g.
+ 'openssl(SSL)'
+ type: str
+
+ update:
+ description:
+ - Whether or not to update sorcery scripts at the very first stage
+ type: bool
+ default: no
+
+ update_cache:
+ description:
+ - Whether or not to update grimoire collection before casting spells
+ type: bool
+ default: no
+ aliases: ["update_codex"]
+
+ cache_valid_time:
+ description:
+ - Time in seconds to invalidate grimoire collection on update
+ - especially useful for SCM and rsync grimoires
+ - makes sense only in pair with C(update_cache)
+ type: int
+'''
+
+
+EXAMPLES = '''
+- name: Make sure spell foo is installed
+ community.general.sorcery:
+ spell: foo
+ state: present
+
+- name: Make sure spells foo, bar and baz are removed
+ community.general.sorcery:
+ spell: foo,bar,baz
+ state: absent
+
+- name: Make sure spell foo with dependencies bar and baz is installed
+ community.general.sorcery:
+ spell: foo
+ depends: bar,baz
+ state: present
+
+- name: Make sure spell foo with bar and without baz dependencies is installed
+ community.general.sorcery:
+ spell: foo
+ depends: +bar,-baz
+ state: present
+
+- name: Make sure spell foo with libressl (providing SSL) dependency is installed
+ community.general.sorcery:
+ spell: foo
+ depends: libressl(SSL)
+ state: present
+
+- name: Make sure spells with/without required dependencies (if any) are installed
+ community.general.sorcery:
+ name: "{{ item.spell }}"
+ depends: "{{ item.depends | default(None) }}"
+ state: present
+ loop:
+ - { spell: 'vifm', depends: '+file,-gtk+2' }
+ - { spell: 'fwknop', depends: 'gpgme' }
+ - { spell: 'pv,tnftp,tor' }
+
+- name: Install the latest version of spell foo using regular glossary
+ community.general.sorcery:
+ name: foo
+ state: latest
+
+- name: Rebuild spell foo
+ community.general.sorcery:
+ spell: foo
+ state: rebuild
+
+- name: Rebuild the whole system, but update Sorcery and Codex first
+ community.general.sorcery:
+ spell: '*'
+ state: rebuild
+ update: yes
+ update_cache: yes
+
+- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias
+ community.general.sorcery:
+ update_codex: yes
+ cache_valid_time: 86400
+
+- name: Update only Sorcery itself
+ community.general.sorcery:
+ update: yes
+'''
+
+
+RETURN = '''
+'''
+
+
+import datetime
+import fileinput
+import os
+import re
+import shutil
+import sys
+
+
+# auto-filled at module init
+SORCERY = {
+ 'sorcery': None,
+ 'scribe': None,
+ 'cast': None,
+ 'dispel': None,
+ 'gaze': None
+}
+
+SORCERY_LOG_DIR = "/var/log/sorcery"
+SORCERY_STATE_DIR = "/var/state/sorcery"
+
+
+def get_sorcery_ver(module):
+ """ Get Sorcery version. """
+
+ cmd_sorcery = "%s --version" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0 or not stdout:
+ module.fail_json(msg="unable to get Sorcery version")
+
+ return stdout.strip()
+
+
+def codex_fresh(codex, module):
+ """ Check if grimoire collection is fresh enough. """
+
+ if not module.params['cache_valid_time']:
+ return False
+
+ timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
+
+ for grimoire in codex:
+ lastupdate_path = os.path.join(SORCERY_STATE_DIR,
+ grimoire + ".lastupdate")
+
+ try:
+ mtime = os.stat(lastupdate_path).st_mtime
+ except Exception:
+ return False
+
+ lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
+
+ # if any grimoire is not fresh, we invalidate the Codex
+ if lastupdate_ts + timedelta < datetime.datetime.now():
+ return False
+
+ return True
+
+
+def codex_list(module):
+ """ List valid grimoire collection. """
+
+ codex = {}
+
+ cmd_scribe = "%s index" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to list grimoire collection, fix your Codex")
+
+ rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
+
+ # drop 4-line header and empty trailing line
+ for line in stdout.splitlines()[4:-1]:
+ match = rex.match(line)
+
+ if match:
+ codex[match.group('grim')] = match.group('ver')
+
+ if not codex:
+ module.fail_json(msg="no grimoires to operate on; add at least one")
+
+ return codex
+
+
+def update_sorcery(module):
+ """ Update sorcery scripts.
+
+ This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
+ positive change value.
+
+ """
+
+ changed = False
+
+ if module.check_mode:
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=True, msg="would have updated Sorcery")
+ else:
+ sorcery_ver = get_sorcery_ver(module)
+
+ cmd_sorcery = "%s update" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Sorcery: " + stdout)
+
+ if sorcery_ver != get_sorcery_ver(module):
+ changed = True
+
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Sorcery")
+
+
+def update_codex(module):
+ """ Update grimoire collections.
+
+ This runs 'scribe update'. Check mode always returns a positive change
+ value when 'cache_valid_time' is used.
+
+ """
+
+ params = module.params
+
+ changed = False
+
+ codex = codex_list(module)
+ fresh = codex_fresh(codex, module)
+
+ if module.check_mode:
+ if not params['name']:
+ if not fresh:
+ changed = True
+
+ module.exit_json(changed=changed, msg="would have updated Codex")
+ elif not fresh or params['name'] and params['state'] == 'latest':
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_scribe = "%s update" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
+
+ if codex != codex_list(module):
+ changed = True
+
+ if not params['name']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Codex")
+
+
+def match_depends(module):
+ """ Check for matching dependencies.
+
+ This inspects spell's dependencies with the desired states and returns
+ 'False' if a recast is needed to match them. It also adds required lines
+ to the system-wide depends file for proper recast procedure.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ depends = {}
+
+ depends_ok = True
+
+ if len(spells) > 1 or not params['depends']:
+ return depends_ok
+
+ spell = spells[0]
+
+ if module.check_mode:
+ sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
+
+ try:
+ shutil.copy2(sorcery_depends_orig, sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to copy depends.check file")
+ else:
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
+
+ rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
+
+ for d in params['depends'].split(','):
+ match = rex.match(d)
+
+ if not match:
+ module.fail_json(msg="wrong depends line for spell '%s'" % spell)
+
+ # normalize status
+ if not match.group('status') or match.group('status') == '+':
+ status = 'on'
+ else:
+ status = 'off'
+
+ depends[match.group('depend')] = status
+
+ # drop providers spec
+ depends_list = [s.split('(')[0] for s in depends]
+
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ if rc != 0:
+ module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
+
+ fi = fileinput.input(sorcery_depends, inplace=True)
+
+ try:
+ try:
+ for line in fi:
+ if line.startswith(spell + ':'):
+ match = None
+
+ for d in depends:
+ # when local status is 'off' and dependency is provider,
+ # use only provider value
+ d_offset = d.find('(')
+
+ if d_offset == -1:
+ d_p = ''
+ else:
+ d_p = re.escape(d[d_offset:])
+
+ # .escape() is needed mostly for the spells like 'libsigc++'
+ rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
+ (re.escape(spell), re.escape(d), d_p))
+
+ match = rex.match(line)
+
+ # we matched the line "spell:dependency:on|off:optional:"
+ if match:
+ # if we also matched the local status, mark dependency
+ # as empty and put it back into depends file
+ if match.group('lstatus') == depends[d]:
+ depends[d] = None
+
+ sys.stdout.write(line)
+
+ # status is not that we need, so keep this dependency
+ # in the list for further reverse switching;
+ # stop and process the next line in both cases
+ break
+
+ if not match:
+ sys.stdout.write(line)
+ else:
+ sys.stdout.write(line)
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fi.close()
+
+ depends_new = [v for v in depends if depends[v]]
+
+ if depends_new:
+ try:
+ try:
+ fl = open(sorcery_depends, 'a')
+
+ for k in depends_new:
+ fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fl.close()
+
+ depends_ok = False
+
+ if module.check_mode:
+ try:
+ os.remove(sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to clean up depends.backup file")
+
+ return depends_ok
+
+
+def manage_spells(module):
+ """ Cast or dispel spells.
+
+ This manages the whole system ('*'), list or a single spell. Command 'cast'
+ is used to install or rebuild spells, while 'dispel' takes care of theirs
+ removal from the system.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
+
+ if spells == '*':
+ if params['state'] == 'latest':
+ # back up original queue
+ try:
+ os.rename(sorcery_queue, sorcery_queue + ".backup")
+ except IOError:
+ module.fail_json(msg="failed to backup the update queue")
+
+ # see update_codex()
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_sorcery = "%s queue"
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to generate the update queue")
+
+ try:
+ queue_size = os.stat(sorcery_queue).st_size
+ except Exception:
+ module.fail_json(msg="failed to read the update queue")
+
+ if queue_size != 0:
+ if module.check_mode:
+ try:
+ os.rename(sorcery_queue + ".backup", sorcery_queue)
+ except IOError:
+ module.fail_json(msg="failed to restore the update queue")
+
+ module.exit_json(changed=True, msg="would have updated the system")
+
+ cmd_cast = "%s --queue" % SORCERY['cast']
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to update the system")
+
+ module.exit_json(changed=True, msg="successfully updated the system")
+ else:
+ module.exit_json(changed=False, msg="the system is already up to date")
+ elif params['state'] == 'rebuild':
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have rebuilt the system")
+
+ cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to rebuild the system: " + stdout)
+
+ module.exit_json(changed=True, msg="successfully rebuilt the system")
+ else:
+ module.fail_json(msg="unsupported operation on '*' name value")
+ else:
+ if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
+ # extract versions from the 'gaze' command
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ # fail if any of spells cannot be found
+ if rc != 0:
+ module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
+ ', '.join(spells))
+
+ cast_queue = []
+ dispel_queue = []
+
+ rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
+
+ # drop 2-line header and empty trailing line
+ for line in stdout.splitlines()[2:-1]:
+ match = rex.match(line)
+
+ cast = False
+
+ if params['state'] == 'present':
+ # spell is not installed..
+ if match.group('inst_ver') == '-':
+ # ..so set up depends reqs for it
+ match_depends(module)
+
+ cast = True
+ # spell is installed..
+ else:
+ # ..but does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'latest':
+ # grimoire and installed versions do not match..
+ if match.group('grim_ver') != match.group('inst_ver'):
+ # ..so check for depends reqs first and set them up
+ match_depends(module)
+
+ cast = True
+ # grimoire and installed versions match..
+ else:
+ # ..but the spell does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'rebuild':
+ cast = True
+ # 'absent'
+ else:
+ if match.group('inst_ver') != '-':
+ dispel_queue.append(match.group('spell'))
+
+ if cast:
+ cast_queue.append(match.group('spell'))
+
+ if cast_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have cast spell(s)")
+
+ cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully cast spell(s)")
+ elif params['state'] != 'absent':
+ module.exit_json(changed=False, msg="spell(s) are already cast")
+
+ if dispel_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have dispelled spell(s)")
+
+ cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_dispel)
+
+ if rc != 0:
+ module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ else:
+ module.exit_json(changed=False, msg="spell(s) are already dispelled")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
+ ),
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
+ )
+
+ if os.geteuid() != 0:
+ module.fail_json(msg="root privileges are required for this operation")
+
+ for c in SORCERY:
+ SORCERY[c] = module.get_bin_path(c, True)
+
+ # prepare environment: run sorcery commands without asking questions
+ module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
+
+ params = module.params
+
+ # normalize 'state' parameter
+ if params['state'] in ('present', 'cast'):
+ params['state'] = 'present'
+ elif params['state'] in ('absent', 'dispelled'):
+ params['state'] = 'absent'
+
+ if params['update']:
+ update_sorcery(module)
+
+ if params['update_cache'] or params['state'] == 'latest':
+ update_codex(module)
+
+ if params['name']:
+ manage_spells(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py
new file mode 100644
index 00000000..21d17f4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+author: "Boyd Adamson (@brontitall)"
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+ type: str
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when C(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
+ type: str
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if C(src) is a URL.
+ type: str
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ type: str
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ type: str
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install a package from an already copied file
+ community.general.svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
+
+- name: Install a package directly from an http site
+ community.general.svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
+
+- name: Install a package with a response file
+ community.general.svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
+
+- name: Ensure that a package is not installed
+ community.general.svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
+
+- name: Ensure that a category is not installed
+ community.general.svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
+'''
+
+
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True)]
+ cmd.append('-q')
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = '''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = ['pkgadd', '-n']
+ if zone == 'current':
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
+ if proxy is not None:
+ cmd += ['-x', proxy]
+ if response_file is not None:
+ cmd += ['-r', response_file]
+ if category:
+ cmd += ['-Y']
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
+ else:
+ cmd = ['pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py
new file mode 100644
index 00000000..7e9db835
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+notes: []
+author: "Raul Melo (@melodous)"
+options:
+ name:
+ description:
+ - package name.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ choices: [ 'present', 'latest', 'absent']
+ type: str
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install a package
+ community.general.swdepot:
+ name: unzip-6.0
+ state: present
+ depot: 'repository:/path'
+
+- name: Install the latest version of a package
+ community.general.swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- name: Remove a package
+ community.general.swdepot:
+ name: unzip
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 first greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ normalized_version1 = normalize(version1)
+ normalized_version2 = normalize(version2)
+ if normalized_version1 == normalized_version2:
+ rc = 0
+ elif normalized_version1 < normalized_version2:
+ rc = -1
+ else:
+ rc = 1
+ return rc
+
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
+ use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if (state == 'present' or state == 'latest') and depot is None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ # Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if (state == 'present' or state == 'latest') and installed is False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed is True:
+ # Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed, version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed is True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py
new file mode 100644
index 00000000..4dac01be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swupd
+short_description: Manages updates and bundles in ClearLinux systems.
+description:
+ - Manages updates and bundles with the swupd bundle manager, which is used by the
+ Clear Linux Project for Intel Architecture.
+author: Alberto Murillo (@albertomurillo)
+options:
+ contenturl:
+ description:
+ - URL pointing to the contents of available bundles.
+ If not specified, the contents are retrieved from clearlinux.org.
+ type: str
+ format:
+ description:
+ - The format suffix for version file downloads. For example [1,2,3,staging,etc].
+ If not specified, the default format is used.
+ type: str
+ manifest:
+ description:
+ - The manifest contains information about the bundles at certain version of the OS.
+ Specify a Manifest version to verify against that version or leave unspecified to
+ verify against the current version.
+ aliases: [release, version]
+ type: int
+ name:
+ description:
+ - Name of the (I)bundle to install or remove.
+ aliases: [bundle]
+ type: str
+ state:
+ description:
+ - Indicates the desired (I)bundle state. C(present) ensures the bundle
+ is installed while C(absent) ensures the (I)bundle is not installed.
+ default: present
+ choices: [present, absent]
+ type: str
+ update:
+ description:
+ - Updates the OS to the latest version.
+ type: bool
+ default: false
+ url:
+ description:
+ - Overrides both I(contenturl) and I(versionurl).
+ type: str
+ verify:
+ description:
+ - Verify content for OS version.
+ type: bool
+ default: false
+ versionurl:
+ description:
+ - URL for version string download.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Update the OS to the latest version
+ community.general.swupd:
+ update: yes
+
+- name: Installs the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: present
+
+- name: Removes the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: absent
+
+- name: Check integrity of filesystem
+ community.general.swupd:
+ verify: yes
+
+- name: Downgrade OS to release 12920
+ community.general.swupd:
+ verify: yes
+ manifest: 12920
+'''
+
+RETURN = '''
+stdout:
+ description: stdout of swupd
+ returned: always
+ type: str
+stderr:
+ description: stderr of swupd
+ returned: always
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Swupd(object):
+ FILES_NOT_MATCH = "files did not match"
+ FILES_REPLACED = "missing files were replaced"
+ FILES_FIXED = "files were fixed"
+ FILES_DELETED = "files were deleted"
+
+ def __init__(self, module):
+ # Fail if swupd is not found
+ self.module = module
+ self.swupd_cmd = module.get_bin_path("swupd", False)
+ if not self.swupd_cmd:
+ module.fail_json(msg="Could not find swupd.")
+
+ # Initialize parameters
+ for key in module.params.keys():
+ setattr(self, key, module.params[key])
+
+ # Initialize return values
+ self.changed = False
+ self.failed = False
+ self.msg = None
+ self.rc = None
+ self.stderr = ""
+ self.stdout = ""
+
+ def _run_cmd(self, cmd):
+ self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
+
+ def _get_cmd(self, command):
+ cmd = "%s %s" % (self.swupd_cmd, command)
+
+ if self.format:
+ cmd += " --format=%s" % self.format
+ if self.manifest:
+ cmd += " --manifest=%s" % self.manifest
+ if self.url:
+ cmd += " --url=%s" % self.url
+ else:
+ if self.contenturl and command != "check-update":
+ cmd += " --contenturl=%s" % self.contenturl
+ if self.versionurl:
+ cmd += " --versionurl=%s" % self.versionurl
+
+ return cmd
+
+ def _is_bundle_installed(self, bundle):
+ try:
+ os.stat("/usr/share/clear/bundles/%s" % bundle)
+ except OSError:
+ return False
+
+ return True
+
+ def _needs_update(self):
+ cmd = self._get_cmd("check-update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ return True
+
+ if self.rc == 1:
+ return False
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def _needs_verify(self):
+ cmd = self._get_cmd("verify")
+ self._run_cmd(cmd)
+
+ if self.rc != 0:
+ self.failed = True
+ self.msg = "Failed to check for filesystem inconsistencies."
+
+ if self.FILES_NOT_MATCH in self.stdout:
+ return True
+
+ return False
+
+ def install_bundle(self, bundle):
+ """Installs a bundle with `swupd bundle-add bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=not self._is_bundle_installed(bundle))
+
+ if self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s is already installed" % bundle
+ return
+
+ cmd = self._get_cmd("bundle-add %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s installed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to install bundle %s" % bundle
+
+ def remove_bundle(self, bundle):
+ """Removes a bundle with `swupd bundle-remove bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._is_bundle_installed(bundle))
+
+ if not self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s not installed"
+ return
+
+ cmd = self._get_cmd("bundle-remove %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s removed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to remove bundle %s" % bundle
+
+ def update_os(self):
+ """Updates the os with `swupd update`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_update())
+
+ if not self._needs_update():
+ self.msg = "There are no updates available"
+ return
+
+ cmd = self._get_cmd("update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Update successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def verify_os(self):
+ """Verifies filesystem against specified or current version"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_verify())
+
+ if not self._needs_verify():
+ self.msg = "No files where changed"
+ return
+
+ cmd = self._get_cmd("verify --fix")
+ self._run_cmd(cmd)
+
+ if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
+ self.changed = True
+ self.msg = "Fix successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to verify the OS"
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ contenturl=dict(type="str"),
+ format=dict(type="str"),
+ manifest=dict(aliases=["release", "version"], type="int"),
+ name=dict(aliases=["bundle"], type="str"),
+ state=dict(default="present", choices=["present", "absent"], type="str"),
+ update=dict(default=False, type="bool"),
+ url=dict(type="str"),
+ verify=dict(default=False, type="bool"),
+ versionurl=dict(type="str"),
+ ),
+ required_one_of=[["name", "update", "verify"]],
+ mutually_exclusive=[["name", "update", "verify"]],
+ supports_check_mode=True
+ )
+
+ swupd = Swupd(module)
+
+ name = module.params["name"]
+ state = module.params["state"]
+ update = module.params["update"]
+ verify = module.params["verify"]
+
+ if update:
+ swupd.update_os()
+ elif verify:
+ swupd.verify_os()
+ elif state == "present":
+ swupd.install_bundle(name)
+ elif state == "absent":
+ swupd.remove_bundle(name)
+ else:
+ swupd.failed = True
+
+ if swupd.failed:
+ module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+ else:
+ module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py
new file mode 100644
index 00000000..9d54fbcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+options:
+ name:
+ description:
+ - A list of package names to install, upgrade or remove.
+ required: yes
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update the package database first C(urpmi.update -a).
+ type: bool
+ default: no
+ aliases: ['update-cache']
+ no_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ type: bool
+ default: yes
+ aliases: ['no-recommends']
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ type: bool
+ default: yes
+ root:
+ description:
+ - Specifies an alternative install root, relative to which all packages will be installed.
+ Corresponds to the C(--root) option for I(urpmi).
+ aliases: [ installroot ]
+ type: str
+author:
+- Philippe Makowski (@pmakowski)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.urpmi:
+ pkg: foo
+ state: present
+
+- name: Remove package foo
+ community.general.urpmi:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.urpmi:
+ pkg: foo,bar
+ state: absent
+
+- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- community.general.urpmi:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+
+ urpmiupdate_path = module.get_bin_path("urpmi.update", True)
+ cmd = "%s -a -q" % (urpmiupdate_path,)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages, root):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package, root):
+ continue
+
+ urpme_path = module.get_bin_path("urpme", True)
+ cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, root, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ urpmi_path = module.get_bin_path("urpmi", True)
+ cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes,
+ no_recommends_yes,
+ root_option(root),
+ packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def root_option(root):
+ if (root):
+ return "--root=%s" % (root)
+ else:
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ force=dict(type='bool', default=True),
+ no_recommends=dict(type='bool', default=True, aliases=['no-recommends']),
+ name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']),
+ root=dict(type='str', aliases=['installroot']),
+ ),
+ )
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, p['name'], p['root'], p['force'], p['no_recommends'])
+
+ elif p['state'] in ['removed', 'absent']:
+ remove_packages(module, p['name'], p['root'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py
new file mode 100644
index 00000000..6f2f5dfa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ aliases: [pkg,package]
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent", "latest", "installed", "removed"]
+ type: str
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ type: bool
+ default: no
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ aliases: ['update-cache']
+ type: bool
+ default: yes
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ type: bool
+ default: no
+ upgrade_xbps:
+ description:
+ - Whether or not to upgrade the xbps package when necessary.
+ Before installing new packages,
+ xbps requires the user to update the xbps package itself.
+ Thus when this option is set to C(no),
+ upgrades and installations will fail when xbps is not up to date.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ force:
+ description:
+ - This option doesn't have any effect and is deprecated, it will be
+ removed in 3.0.0.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo (automatically updating the xbps package if needed)
+ community.general.xbps: name=foo state=present
+
+- name: Upgrade package foo
+ community.general.xbps: name=foo state=latest update_cache=yes
+
+- name: Remove packages foo and bar
+ community.general.xbps: name=foo,bar state=absent
+
+- name: Recursively remove package foo
+ community.general.xbps: name=foo state=absent recurse=yes
+
+- name: Update package cache
+ community.general.xbps: update_cache=yes
+
+- name: Upgrade packages
+ community.general.xbps: upgrade=yes
+
+- name: Install a package, failing if the xbps package is out of date
+ community.general.xbps:
+ name: foo
+ state: present
+ upgrade_xbps: no
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: str
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+ returned: success
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade_xbps(module, xbps_path, exit_on_success=False):
+ cmdupgradexbps = "%s -uy xbps" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg='Could not upgrade xbps itself')
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if(len(stdout.splitlines()) == 0):
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ elif module.check_mode:
+ module.exit_json(changed=True, msg='Would have performed upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ elif rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-upgrade again
+ module.params['upgrade_xbps'] = False
+ upgrade(module, xbps_path)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-update again
+ module.params['upgrade_xbps'] = False
+ install_packages(module, xbps_path, state, packages)
+ elif rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s" % (package))
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def update_cache(module, xbps_path, upgrade_planned):
+ """Update package cache"""
+ if module.check_mode:
+ if upgrade_planned:
+ return
+ module.exit_json(
+ changed=True, msg='Would have updated the package cache'
+ )
+ changed = update_package_db(module, xbps_path)
+ if not upgrade_planned:
+ module.exit_json(changed=changed, msg=(
+ 'Updated the package master lists' if changed
+ else 'Package list already up to date'
+ ))
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, aliases=['update-cache'],
+ type='bool'),
+ upgrade_xbps=dict(default=True, type='bool')
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_cache(module, xbps_path, (p['name'] or p['upgrade']))
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py
new file mode 100644
index 00000000..9ad539ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade, installed, removed ]
+ default: "present"
+ type: str
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ type: str
+ extra_args_precommand:
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ version_added: '0.2.0'
+ update_cache:
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ allow_vendor_change:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command.
+ version_added: '0.2.0'
+ replacefiles:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--replacefiles) option to I(zypper) install/update command.
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+- name: Install nmap
+ community.general.zypper:
+ name: nmap
+ state: present
+
+- name: Install apache2 with recommended packages
+ community.general.zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+- name: Apply a given patch
+ community.general.zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+- name: Remove the nmap package
+ community.general.zypper:
+ name: nmap
+ state: absent
+
+- name: Install the nginx rpm from a remote repo
+ community.general.zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+- name: Install local rpm file
+ community.general.zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+- name: Update all packages
+ community.general.zypper:
+ name: '*'
+ state: latest
+
+- name: Apply all available patches
+ community.general.zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+- name: Perform a dist-upgrade with additional arguments
+ community.general.zypper:
+ name: '*'
+ state: dist-upgrade
+ allow_vendor_change: true
+ extra_args: '--allow-arch-change'
+
+- name: Perform a installaion of nmap with the install option replacefiles
+ community.general.zypper:
+ name: 'nmap'
+ state: latest
+ replacefiles: true
+
+- name: Refresh repositories and update package openssl
+ community.general.zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+- name: "Install specific version (possible comparisons: <, >, <=, >=, =)"
+ community.general.zypper:
+ name: 'docker>=1.10'
+ state: present
+
+- name: Wait 20 seconds to acquire the lock before failing
+ community.general.zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['replacefiles']:
+ cmd.append('--replacefiles')
+ if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
+ cmd.append('--allow-vendor-change')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ disable_recommends=dict(required=False, default=True, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ force_resolution=dict(required=False, default=False, type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(required=False, default=False, type='bool'),
+ extra_args=dict(required=False, default=None),
+ allow_vendor_change=dict(required=False, default=False, type='bool'),
+ replacefiles=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py
new file mode 100644
index 00000000..55738b58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+options:
+ name:
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ type: str
+ repo:
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ type: str
+ state:
+ description:
+ - A source string state.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+ description:
+ description:
+ - A description of the repository
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ type: bool
+ default: no
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ type: bool
+ default: yes
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ type: int
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ type: bool
+ default: no
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ - Only works with C(.repo) files if `name` is given explicitly.
+ type: bool
+ default: no
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ type: bool
+ default: no
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ type: bool
+ default: yes
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+- name: Add NVIDIA repository for graphics drivers
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
+
+- name: Remove NVIDIA repository
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
+
+- name: Add python development repository
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+- name: Refresh all repos
+ community.general.zypper_repository:
+ repo: '*'
+ runrefresh: yes
+
+- name: Add a repo and add its gpg key
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: yes
+
+- name: Force refresh of a repository
+ community.general.zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo'
+ name: my_ci_repo
+ state: present
+ runrefresh: yes
+'''
+
+import traceback
+
+XML_IMP_ERR = None
+try:
+ from xml.dom.minidom import parseString as parseXML
+ HAS_XML = True
+except ImportError:
+ XML_IMP_ERR = traceback.format_exc()
+ HAS_XML = False
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
+ if not HAS_XML:
+ module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd('refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default=False, type='bool'),
+ description=dict(required=False),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of=[['state', 'runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
+ else:
+ shortname = alias
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py
new file mode 100644
index 00000000..c76530f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py
@@ -0,0 +1,651 @@
+#!/usr/bin/python
+# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
+# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
+# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_device
+
+short_description: Manage a bare metal server in the Packet Host.
+
+description:
+ - Manage a bare metal server in the Packet Host (a "device" in the API terms).
+ - When the machine is created it can optionally wait for public IP address, or for active state.
+ - This module has a dependency on packet >= 1.0.
+ - API is documented at U(https://www.packet.net/developers/api/devices).
+
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+ - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+
+ count:
+ description:
+ - The number of devices to create. Count number can be included in hostname via the %d string formatter.
+ default: 1
+
+ count_offset:
+ description:
+ - From which number to start the count.
+ default: 1
+
+ device_ids:
+ description:
+ - List of device IDs on which to operate.
+
+ tags:
+ description:
+ - List of device tags.
+ - Currently implemented only for device creation.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+
+ facility:
+ description:
+ - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
+
+ features:
+ description:
+ - Dict with "features" for device creation. See Packet API docs for details.
+
+ hostnames:
+ description:
+ - A hostname of a device, or a list of hostnames.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
+ - If only one hostname, it might be expanded to list if I(count)>1.
+ aliases: [name]
+
+ locked:
+ description:
+ - Whether to lock a created device.
+ default: false
+ aliases: [lock]
+ type: bool
+
+ operating_system:
+ description:
+ - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
+
+ plan:
+ description:
+ - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+
+ state:
+ description:
+ - Desired state of the device.
+ - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ choices: [present, absent, active, inactive, rebooted]
+ default: present
+
+ user_data:
+ description:
+ - Userdata blob made available to the machine
+
+ wait_for_public_IPv:
+ description:
+ - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
+ - If set to 4, it will wait until IPv4 is assigned to the instance.
+ - If set to 6, wait until public IPv6 is assigned to the instance.
+ choices: [4,6]
+
+ wait_timeout:
+ description:
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
+ - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ default: 900
+ ipxe_script_url:
+ description:
+ - URL of custom iPXE script for provisioning.
+ - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
+ always_pxe:
+ description:
+ - Persist PXE as the first boot option.
+ - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
+ default: false
+ type: bool
+
+
+requirements:
+ - "packet-python >= 1.35"
+
+notes:
+ - Doesn't support check mode.
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+# Creating devices
+
+- name: Create 1 device
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ tags: ci-xyz
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+# Create the same device and wait until it is in state "active", (when it's
+# ready for other API operations). Fail if the device is not "active" in
+# 10 minutes.
+
+- name: Create device and wait up to 10 minutes for active state
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+ wait_timeout: 600
+
+- name: Create 3 ubuntu devices called server-01, server-02 and server-03
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: server-%02d
+ count: 3
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
+ hosts: localhost
+ tasks:
+ - name: Create 3 devices and register their facts
+ community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_stable
+ plan: baremetal_0
+ facility: ewr1
+ locked: true
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - {{ lookup('file', 'my_packet_sshkey') }}
+ coreos:
+ etcd:
+ discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
+ addr: $private_ipv4:4001
+ peer-addr: $private_ipv4:7001
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: Wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ with_items: "{{ newhosts.devices }}"
+
+
+# Other states of devices
+
+- name: Remove 3 devices by uuid
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ state: absent
+ device_ids:
+ - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
+ - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
+ - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
+'''
+
+RETURN = '''
+changed:
+ description: True if a device was altered in any way (created, modified or removed)
+ type: bool
+ sample: True
+ returned: success
+
+devices:
+ description: Information about each device that was processed
+ type: list
+ sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
+ "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
+ "tags": [], "locked": false, "state": "provisioning",
+ "public_ipv6": ""2604:1380:2:5200::3"}]'
+ returned: success
+''' # NOQA
+
+
+import os
+import re
+import time
+import uuid
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+MAX_DEVICES = 100
+
+PACKET_DEVICE_STATES = (
+ 'queued',
+ 'provisioning',
+ 'failed',
+ 'powering_on',
+ 'active',
+ 'powering_off',
+ 'inactive',
+ 'rebooting',
+)
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
+
+
+def serialize_device(device):
+ """
+ Standard representation for a device as returned by various tasks::
+
+ {
+ 'id': 'device_id'
+ 'hostname': 'device_hostname',
+ 'tags': [],
+ 'locked': false,
+ 'state': 'provisioning',
+ 'ip_addresses': [
+ {
+ "address": "147.75.194.227",
+ "address_family": 4,
+ "public": true
+ },
+ {
+ "address": "2604:1380:2:5200::3",
+ "address_family": 6,
+ "public": true
+ },
+ {
+ "address": "10.100.11.129",
+ "address_family": 4,
+ "public": false
+ }
+ ],
+ "private_ipv4": "10.100.11.129",
+ "public_ipv4": "147.75.194.227",
+ "public_ipv6": "2604:1380:2:5200::3",
+ }
+
+ """
+ device_data = {}
+ device_data['id'] = device.id
+ device_data['hostname'] = device.hostname
+ device_data['tags'] = device.tags
+ device_data['locked'] = device.locked
+ device_data['state'] = device.state
+ device_data['ip_addresses'] = [
+ {
+ 'address': addr_data['address'],
+ 'address_family': addr_data['address_family'],
+ 'public': addr_data['public'],
+ }
+ for addr_data in device.ip_addresses
+ ]
+ # Also include each IPs as a key for easier lookup in roles.
+ # Key names:
+ # - public_ipv4
+ # - public_ipv6
+ # - private_ipv4
+ # - private_ipv6 (if there is one)
+ for ipdata in device_data['ip_addresses']:
+ if ipdata['public']:
+ if ipdata['address_family'] == 6:
+ device_data['public_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['public_ipv4'] = ipdata['address']
+ elif not ipdata['public']:
+ if ipdata['address_family'] == 6:
+ # Packet doesn't give public ipv6 yet, but maybe one
+ # day they will
+ device_data['private_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['private_ipv4'] = ipdata['address']
+ return device_data
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def listify_string_name_or_id(s):
+ if ',' in s:
+ return s.split(',')
+ else:
+ return [s]
+
+
+def get_hostname_list(module):
+ # hostname is a list-typed param, so I guess it should return list
+ # (and it does, in Ansible 2.2.1) but in order to be defensive,
+ # I keep here the code to convert an eventual string to list
+ hostnames = module.params.get('hostnames')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ if isinstance(hostnames, str):
+ hostnames = listify_string_name_or_id(hostnames)
+ if not isinstance(hostnames, list):
+ raise Exception("name %s is not convertible to list" % hostnames)
+
+ # at this point, hostnames is a list
+ hostnames = [h.strip() for h in hostnames]
+
+ if (len(hostnames) > 1) and (count > 1):
+ _msg = ("If you set count>1, you should only specify one hostname "
+ "with the %d formatter, not a list of hostnames.")
+ raise Exception(_msg)
+
+ if (len(hostnames) == 1) and (count > 0):
+ hostname_spec = hostnames[0]
+ count_range = range(count_offset, count_offset + count)
+ if re.search(r"%\d{0,2}d", hostname_spec):
+ hostnames = [hostname_spec % i for i in count_range]
+ elif count > 1:
+ hostname_spec = '%s%%02d' % hostname_spec
+ hostnames = [hostname_spec % i for i in count_range]
+
+ for hn in hostnames:
+ if not is_valid_hostname(hn):
+ raise Exception("Hostname '%s' does not seem to be valid" % hn)
+
+ if len(hostnames) > MAX_DEVICES:
+ raise Exception("You specified too many hostnames, max is %d" %
+ MAX_DEVICES)
+ return hostnames
+
+
+def get_device_id_list(module):
+ device_ids = module.params.get('device_ids')
+
+ if isinstance(device_ids, str):
+ device_ids = listify_string_name_or_id(device_ids)
+
+ device_ids = [di.strip() for di in device_ids]
+
+ for di in device_ids:
+ if not is_valid_uuid(di):
+ raise Exception("Device ID '%s' does not seem to be valid" % di)
+
+ if len(device_ids) > MAX_DEVICES:
+ raise Exception("You specified too many devices, max is %d" %
+ MAX_DEVICES)
+ return device_ids
+
+
+def create_single_device(module, packet_conn, hostname):
+
+ for param in ('hostnames', 'operating_system', 'plan'):
+ if not module.params.get(param):
+ raise Exception("%s parameter is required for new device."
+ % param)
+ project_id = module.params.get('project_id')
+ plan = module.params.get('plan')
+ tags = module.params.get('tags')
+ user_data = module.params.get('user_data')
+ facility = module.params.get('facility')
+ operating_system = module.params.get('operating_system')
+ locked = module.params.get('locked')
+ ipxe_script_url = module.params.get('ipxe_script_url')
+ always_pxe = module.params.get('always_pxe')
+ if operating_system != 'custom_ipxe':
+ for param in ('ipxe_script_url', 'always_pxe'):
+ if module.params.get(param):
+ raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
+
+ device = packet_conn.create_device(
+ project_id=project_id,
+ hostname=hostname,
+ tags=tags,
+ plan=plan,
+ facility=facility,
+ operating_system=operating_system,
+ userdata=user_data,
+ locked=locked,
+ ipxe_script_url=ipxe_script_url,
+ always_pxe=always_pxe)
+ return device
+
+
+def refresh_device_list(module, packet_conn, devices):
+ device_ids = [d.id for d in devices]
+ new_device_list = get_existing_devices(module, packet_conn)
+ return [d for d in new_device_list if d.id in device_ids]
+
+
+def wait_for_devices_active(module, packet_conn, watched_devices):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ refreshed = watched_devices
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, watched_devices)
+ if all(d.state == 'active' for d in refreshed):
+ return refreshed
+ time.sleep(5)
+ raise Exception("Waiting for state \"active\" timed out for devices: %s"
+ % [d.hostname for d in refreshed if d.state != "active"])
+
+
+def wait_for_public_IPv(module, packet_conn, created_devices):
+
+ def has_public_ip(addr_list, ip_v):
+ return any([a['public'] and a['address_family'] == ip_v and
+ a['address'] for a in addr_list])
+
+ def all_have_public_ip(ds, ip_v):
+ return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
+
+ address_family = module.params.get('wait_for_public_IPv')
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, created_devices)
+ if all_have_public_ip(refreshed, address_family):
+ return refreshed
+ time.sleep(5)
+
+ raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
+ % (address_family, [d.hostname for d in created_devices]))
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ return packet_conn.list_devices(
+ project_id, params={
+ 'per_page': MAX_DEVICES})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_ids'):
+ device_id_list = get_device_id_list(module)
+ return {'ids': device_id_list, 'hostnames': []}
+ elif module.params.get('hostnames'):
+ hostname_list = get_hostname_list(module)
+ return {'hostnames': hostname_list, 'ids': []}
+
+
+def act_on_devices(module, packet_conn, target_state):
+ specified_identifiers = get_specified_device_identifiers(module)
+ existing_devices = get_existing_devices(module, packet_conn)
+ changed = False
+ create_hostnames = []
+ if target_state in ['present', 'active', 'rebooted']:
+ # states where we might create non-existing specified devices
+ existing_devices_names = [ed.hostname for ed in existing_devices]
+ create_hostnames = [hn for hn in specified_identifiers['hostnames']
+ if hn not in existing_devices_names]
+
+ process_devices = [d for d in existing_devices
+ if (d.id in specified_identifiers['ids']) or
+ (d.hostname in specified_identifiers['hostnames'])]
+
+ if target_state != 'present':
+ _absent_state_map = {}
+ for s in PACKET_DEVICE_STATES:
+ _absent_state_map[s] = packet.Device.delete
+
+ state_map = {
+ 'absent': _absent_state_map,
+ 'active': {'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ 'inactive': {'active': packet.Device.power_off},
+ 'rebooted': {'active': packet.Device.reboot,
+ 'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ }
+
+ # First do non-creation actions, it might be faster
+ for d in process_devices:
+ if d.state == target_state:
+ continue
+ if d.state in state_map[target_state]:
+ api_operation = state_map[target_state].get(d.state)
+ if api_operation is not None:
+ api_operation(d)
+ changed = True
+ else:
+ _msg = (
+ "I don't know how to process existing device %s from state %s "
+ "to state %s" %
+ (d.hostname, d.state, target_state))
+ raise Exception(_msg)
+
+ # At last create missing devices
+ created_devices = []
+ if create_hostnames:
+ created_devices = [create_single_device(module, packet_conn, n)
+ for n in create_hostnames]
+ if module.params.get('wait_for_public_IPv'):
+ created_devices = wait_for_public_IPv(
+ module, packet_conn, created_devices)
+ changed = True
+
+ processed_devices = created_devices + process_devices
+ if target_state == 'active':
+ processed_devices = wait_for_devices_active(
+ module, packet_conn, processed_devices)
+
+ return {
+ 'changed': changed,
+ 'devices': [serialize_device(d) for d in processed_devices]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ count=dict(type='int', default=1),
+ count_offset=dict(type='int', default=1),
+ device_ids=dict(type='list'),
+ facility=dict(),
+ features=dict(type='dict'),
+ hostnames=dict(type='list', aliases=['name']),
+ tags=dict(type='list', elements='str'),
+ locked=dict(type='bool', default=False, aliases=['lock']),
+ operating_system=dict(),
+ plan=dict(),
+ project_id=dict(required=True),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ user_data=dict(default=None),
+ wait_for_public_IPv=dict(type='int', choices=[4, 6]),
+ wait_timeout=dict(type='int', default=900),
+ ipxe_script_url=dict(default=''),
+ always_pxe=dict(type='bool', default=False),
+ ),
+ required_one_of=[('device_ids', 'hostnames',)],
+ mutually_exclusive=[
+ ('hostnames', 'device_ids'),
+ ('count', 'device_ids'),
+ ('count_offset', 'device_ids'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_devices(module, packet_conn, state))
+ except Exception as e:
+ module.fail_json(msg='failed to set device state %s, error: %s' %
+ (state, to_native(e)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
new file mode 100644
index 00000000..fbc12698
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_ip_subnet
+
+short_description: Assign IP subnet to a bare metal server.
+
+description:
+ - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
+ - IPv4 subnets must come from already reserved block.
+ - IPv6 subnets must come from publicly routable /56 block from your project.
+ - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ hostname:
+ description:
+ - A hostname of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ device_id:
+ description:
+ - UUID of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ project_id:
+ description:
+ - UUID of a project of the device to/from which to assign/remove a subnet.
+ type: str
+
+ device_count:
+ description:
+ - The number of devices to retrieve from the project. The max allowed value is 1000.
+ - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
+ default: 100
+ type: int
+
+ cidr:
+ description:
+ - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
+ aliases: [name]
+ type: str
+ required: true
+
+ state:
+ description:
+ - Desired state of the IP subnet on the specified device.
+ - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
+ - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
+ - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+requirements:
+ - "packet-python >= 1.35"
+ - "python >= 2.6"
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+
+# Pick an IPv4 address from a block allocated to your project.
+
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostname: myserver
+ cidr: "147.75.201.78/32"
+
+# Release IP address 147.75.201.78
+
+- name: Unassign IP address from any device in your project
+ hosts: localhost
+ tasks:
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ cidr: "147.75.201.78/32"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: True if an IP address assignments were altered in any way (created or removed).
+ type: bool
+ sample: True
+ returned: success
+
+device_id:
+ type: str
+ description: UUID of the device associated with the specified IP address.
+ returned: success
+
+subnet:
+ description: Dict with data about the handled IP subnet.
+ type: dict
+ sample:
+ address: 147.75.90.241
+ address_family: 4
+ assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
+ cidr: 31
+ created_at: '2017-08-07T15:15:30Z'
+ enabled: True
+ gateway: 147.75.90.240
+ href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
+ id: 1eda960-0a16-4c0f-b196-f3dc4928529f
+ manageable: True
+ management: True
+ netmask: 255.255.255.254
+ network: 147.75.90.240
+ public: True
+ returned: success
+'''
+
+
+import uuid
+import re
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+PROJECT_MAX_DEVICES = 100
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'present']
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ if not is_valid_uuid(project_id):
+ raise Exception("Project ID {0} does not seem to be valid".format(project_id))
+
+ per_page = module.params.get('device_count')
+ return packet_conn.list_devices(
+ project_id, params={'per_page': per_page})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_id'):
+ _d_id = module.params.get('device_id')
+ if not is_valid_uuid(_d_id):
+ raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
+ return {'device_id': _d_id, 'hostname': None}
+ elif module.params.get('hostname'):
+ _hn = module.params.get('hostname')
+ if not is_valid_hostname(_hn):
+ raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
+ return {'hostname': _hn, 'device_id': None}
+ else:
+ return {'hostname': None, 'device_id': None}
+
+
+def parse_subnet_cidr(cidr):
+ if "/" not in cidr:
+ raise Exception("CIDR expression in wrong format, must be address/prefix_len")
+ addr, prefixlen = cidr.split("/")
+ try:
+ prefixlen = int(prefixlen)
+ except ValueError:
+ raise("Wrong prefix length in CIDR expression {0}".format(cidr))
+ return addr, prefixlen
+
+
+def act_on_assignment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ specified_cidr = module.params.get("cidr")
+ address, prefixlen = parse_subnet_cidr(specified_cidr)
+
+ specified_identifier = get_specified_device_identifiers(module)
+
+ if module.check_mode:
+ return return_dict
+
+ if (specified_identifier['hostname'] is None) and (
+ specified_identifier['device_id'] is None):
+ if target_state == 'absent':
+ # The special case to release the IP from any assignment
+ for d in get_existing_devices(module, packet_conn):
+ for ia in d.ip_addresses:
+ if address == ia['address'] and prefixlen == ia['cidr']:
+ packet_conn.call_api(ia['href'], "DELETE")
+ return_dict['changed'] = True
+ return_dict['subnet'] = ia
+ return_dict['device_id'] = d.id
+ return return_dict
+ raise Exception("If you assign an address, you must specify either "
+ "target device ID or target unique hostname.")
+
+ if specified_identifier['device_id'] is not None:
+ device = packet_conn.get_device(specified_identifier['device_id'])
+ else:
+ all_devices = get_existing_devices(module, packet_conn)
+ hn = specified_identifier['hostname']
+ matching_devices = [d for d in all_devices if d.hostname == hn]
+ if len(matching_devices) > 1:
+ raise Exception("There are more than one devices matching given hostname {0}".format(hn))
+ if len(matching_devices) == 0:
+ raise Exception("There is no device matching given hostname {0}".format(hn))
+ device = matching_devices[0]
+
+ return_dict['device_id'] = device.id
+ assignment_dicts = [i for i in device.ip_addresses
+ if i['address'] == address and i['cidr'] == prefixlen]
+ if len(assignment_dicts) > 1:
+ raise Exception("IP address {0} is assigned more than once for device {1}".format(
+ specified_cidr, device.hostname))
+
+ if target_state == "absent":
+ if len(assignment_dicts) == 1:
+ packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
+ return_dict['subnet'] = assignment_dicts[0]
+ return_dict['changed'] = True
+ elif target_state == "present":
+ if len(assignment_dicts) == 0:
+ new_assignment = packet_conn.call_api(
+ "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
+ return_dict['changed'] = True
+ return_dict['subnet'] = new_assignment
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ device_id=dict(type='str'),
+ hostname=dict(type='str'),
+ project_id=dict(type='str'),
+ device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
+ cidr=dict(type='str', required=True, aliases=['name']),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[('hostname', 'device_id')],
+ required_one_of=[['hostname', 'device_id', 'project_id']],
+ required_by=dict(
+ hostname=('project_id',),
+ ),
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_assignment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py
new file mode 100644
index 00000000..38d7ca76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2019, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_project
+
+short_description: Create/delete a project in Packet host.
+
+description:
+ - Create/delete a project in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#projects).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ payment_method:
+ description:
+ - Payment method is name of one of the payment methods available to your user.
+ - When blank, the API assumes the default payment method.
+ type: str
+
+ auth_token:
+ description:
+ - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Name for/of the project.
+ type: str
+
+ org_id:
+ description:
+ - UUID of the organization to create a project for.
+ - When blank, the API assumes the default organization.
+ type: str
+
+ id:
+ description:
+ - UUID of the project which you want to remove.
+ type: str
+
+ custom_data:
+ description:
+ - Custom data about the project to create.
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.40"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create new project
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "new project"
+
+- name: Create new project within non-default organization
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "my org project"
+ org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
+
+- name: Remove project by id
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+
+- name: Create new project with non-default billing method
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "newer project"
+ payment_method: "the other visa"
+'''
+
+RETURN = '''
+changed:
+ description: True if a project was created or removed.
+ type: bool
+ sample: True
+ returned: success
+
+name:
+ description: Name of addressed project.
+ type: str
+ returned: success
+
+id:
+ description: UUID of addressed project.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def act_on_project(target_state, module, packet_conn):
+ result_dict = {'changed': False}
+ given_id = module.params.get('id')
+ given_name = module.params.get('name')
+ if given_id:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_id == p.id]
+ else:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_name == p.name]
+
+ if target_state == 'present':
+ if len(matching_projects) == 0:
+ org_id = module.params.get('org_id')
+ custom_data = module.params.get('custom_data')
+ payment_method = module.params.get('payment_method')
+
+ if not org_id:
+ params = {
+ "name": given_name,
+ "payment_method_id": payment_method,
+ "customdata": custom_data
+ }
+ new_project_data = packet_conn.call_api("projects", "POST", params)
+ new_project = packet.Project(new_project_data, packet_conn)
+ else:
+ new_project = packet_conn.create_organization_project(
+ org_id=org_id,
+ name=given_name,
+ payment_method_id=payment_method,
+ customdata=custom_data
+ )
+
+ result_dict['changed'] = True
+ matching_projects.append(new_project)
+
+ result_dict['name'] = matching_projects[0].name
+ result_dict['id'] = matching_projects[0].id
+ else:
+ if len(matching_projects) > 1:
+ _msg = ("More than projects matched for module call with state = absent: "
+ "{0}".format(to_native(matching_projects)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_projects) == 1:
+ p = matching_projects[0]
+ result_dict['name'] = p.name
+ result_dict['id'] = p.id
+ result_dict['changed'] = True
+ try:
+ p.delete()
+ except Exception as e:
+ _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
+ p.name, p.id, to_native(e)))
+ module.fail_json(msg=_msg)
+ return result_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ org_id=dict(type='str'),
+ payment_method=dict(type='str'),
+ custom_data=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id",)],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ]
+ )
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_project(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set project state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py
new file mode 100644
index 00000000..73233d89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_sshkey
+short_description: Create/delete an SSH key in Packet host.
+description:
+ - Create/delete an SSH key in Packet host.
+ - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
+author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ label:
+ description:
+ - Label for the key. If you keep it empty, it will be read from key string.
+ id:
+ description:
+ - UUID of the key which you want to remove.
+ fingerprint:
+ description:
+ - Fingerprint of the key which you want to remove.
+ key:
+ description:
+ - Public Key string ({type} {base64 encoded key} {description}).
+ key_file:
+ description:
+ - File with the public key.
+
+requirements:
+ - "python >= 2.6"
+ - packet-python
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create sshkey from string
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
+
+- name: Create sshkey from file
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ label: key from file
+ key_file: ~/ff.pub
+
+- name: Remove sshkey by id
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+'''
+
+RETURN = '''
+changed:
+ description: True if a sshkey was created or removed.
+ type: bool
+ sample: True
+ returned: always
+sshkeys:
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample: [
+ {
+ "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
+ "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
+ "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
+ "label": "mynewkey33"
+ }
+ ]
+ returned: always
+''' # NOQA
+
+import os
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def serialize_sshkey(sshkey):
+ sshkey_data = {}
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
+ for name in copy_keys:
+ sshkey_data[name] = getattr(sshkey, name)
+ return sshkey_data
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def load_key_string(key_str):
+ ret_dict = {}
+ key_str = key_str.strip()
+ ret_dict['key'] = key_str
+ cut_key = key_str.split()
+ if len(cut_key) in [2, 3]:
+ if len(cut_key) == 3:
+ ret_dict['label'] = cut_key[2]
+ else:
+ raise Exception("Public key %s is in wrong format" % key_str)
+ return ret_dict
+
+
+def get_sshkey_selector(module):
+ key_id = module.params.get('id')
+ if key_id:
+ if not is_valid_uuid(key_id):
+ raise Exception("sshkey ID %s is not valid UUID" % key_id)
+ selecting_fields = ['label', 'fingerprint', 'id', 'key']
+ select_dict = {}
+ for f in selecting_fields:
+ if module.params.get(f) is not None:
+ select_dict[f] = module.params.get(f)
+
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as _file:
+ loaded_key = load_key_string(_file.read())
+ select_dict['key'] = loaded_key['key']
+ if module.params.get('label') is None:
+ if loaded_key.get('label'):
+ select_dict['label'] = loaded_key['label']
+
+ def selector(k):
+ if 'key' in select_dict:
+ # if key string is specified, compare only the key strings
+ return k.key == select_dict['key']
+ else:
+ # if key string not specified, all the fields must match
+ return all([select_dict[f] == getattr(k, f) for f in select_dict])
+ return selector
+
+
+def act_on_sshkeys(target_state, module, packet_conn):
+ selector = get_sshkey_selector(module)
+ existing_sshkeys = packet_conn.list_ssh_keys()
+ matching_sshkeys = filter(selector, existing_sshkeys)
+ changed = False
+ if target_state == 'present':
+ if matching_sshkeys == []:
+ # there is no key matching the fields from module call
+ # => create the key, label and
+ newkey = {}
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as f:
+ newkey = load_key_string(f.read())
+ if module.params.get('key'):
+ newkey = load_key_string(module.params.get('key'))
+ if module.params.get('label'):
+ newkey['label'] = module.params.get('label')
+ for param in ('label', 'key'):
+ if param not in newkey:
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
+ raise Exception(_msg)
+ matching_sshkeys = []
+ new_key_response = packet_conn.create_ssh_key(
+ newkey['label'], newkey['key'])
+ changed = True
+
+ matching_sshkeys.append(new_key_response)
+ else:
+ # state is 'absent' => delete matching keys
+ for k in matching_sshkeys:
+ try:
+ k.delete()
+ changed = True
+ except Exception as e:
+ _msg = ("while trying to remove sshkey %s, id %s %s, "
+ "got error: %s" %
+ (k.label, k.id, target_state, e))
+ raise Exception(_msg)
+
+ return {
+ 'changed': changed,
+ 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ label=dict(type='str', aliases=['name'], default=None),
+ id=dict(type='str', default=None),
+ fingerprint=dict(type='str', default=None),
+ key=dict(type='str', default=None, no_log=True),
+ key_file=dict(type='path', default=None),
+ ),
+ mutually_exclusive=[
+ ('label', 'id'),
+ ('label', 'fingerprint'),
+ ('id', 'fingerprint'),
+ ('key', 'fingerprint'),
+ ('key', 'id'),
+ ('key_file', 'key'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ try:
+ module.exit_json(**act_on_sshkeys(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(msg='failed to set sshkey state: %s' % str(e))
+ else:
+ module.fail_json(msg='%s is not a valid state for this module' % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py
new file mode 100644
index 00000000..2966139a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_volume
+
+short_description: Create/delete a volume in Packet host.
+
+description:
+ - Create/delete a volume in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#volumes).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Desired state of the volume.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Selector for API-generated name of the volume
+ type: str
+
+ description:
+ description:
+ - User-defined description attribute for Packet volume.
+ - "It is used used as idempotent identifier - if volume with given
+ description exists, new one is not created."
+ type: str
+
+ id:
+ description:
+ - UUID of a volume.
+ type: str
+
+ plan:
+ description:
+ - storage_1 for standard tier, storage_2 for premium (performance) tier.
+ - Tiers are described at U(https://www.packet.com/cloud/storage/).
+ choices: ['storage_1', 'storage_2']
+ default: 'storage_1'
+ type: str
+
+ facility:
+ description:
+ - Location of the volume.
+ - Volumes can only be attached to device in the same location.
+ type: str
+
+ size:
+ description:
+ - Size of the volume in gigabytes.
+ type: int
+
+ locked:
+ description:
+ - Create new volume locked.
+ type: bool
+ default: False
+
+ billing_cycle:
+ description:
+ - Billing cycle for new volume.
+ choices: ['hourly', 'monthly']
+ default: 'hourly'
+ type: str
+
+ snapshot_policy:
+ description:
+ - Snapshot policy for new volume.
+ type: dict
+
+ suboptions:
+ snapshot_count:
+ description:
+ - How many snapshots to keep, a positive integer.
+ required: True
+ type: int
+
+ snapshot_frequency:
+ description:
+ - Frequency of snapshots.
+ required: True
+ choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+ vars:
+ volname: testvol123
+ project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ community.general.packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: 'ewr1'
+ plan: 'storage_1'
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+ register: result_create
+
+ - name: Delete volume
+ community.general.packet_volume:
+ id: "{{ result_create.id }}"
+ project_id: "{{ project_id }}"
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: UUID of specified volume
+ type: str
+ returned: success
+ sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
+name:
+ description: The API-generated name of the volume resource.
+ type: str
+ returned: if volume is attached/detached to/from some device
+ sample: "volume-a91dc506"
+description:
+ description: The user-defined description of the volume resource.
+ type: str
+ returned: success
+ sample: "Just another volume"
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+VOLUME_PLANS = ["storage_1", "storage_2"]
+VOLUME_STATES = ["present", "absent"]
+BILLING = ["hourly", "monthly"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(module):
+ if module.params.get('id'):
+ i = module.params.get('id')
+ if not is_valid_uuid(i):
+ raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
+ return lambda v: v['id'] == i
+ elif module.params.get('name'):
+ n = module.params.get('name')
+ return lambda v: v['name'] == n
+ elif module.params.get('description'):
+ d = module.params.get('description')
+ return lambda v: v['description'] == d
+
+
+def get_or_fail(params, key):
+ item = params.get(key)
+ if item is None:
+ raise Exception("{0} must be specified for new volume".format(key))
+ return item
+
+
+def act_on_volume(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ s = get_volume_selector(module)
+ project_id = module.params.get("project_id")
+ api_method = "projects/{0}/storage".format(project_id)
+ all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
+ matching_volumes = [v for v in all_volumes if s(v)]
+
+ if target_state == "present":
+ if len(matching_volumes) == 0:
+ params = {
+ "description": get_or_fail(module.params, "description"),
+ "size": get_or_fail(module.params, "size"),
+ "plan": get_or_fail(module.params, "plan"),
+ "facility": get_or_fail(module.params, "facility"),
+ "locked": get_or_fail(module.params, "locked"),
+ "billing_cycle": get_or_fail(module.params, "billing_cycle"),
+ "snapshot_policies": module.params.get("snapshot_policy"),
+ }
+
+ new_volume_data = packet_conn.call_api(api_method, "POST", params)
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = new_volume_data[k]
+
+ else:
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = matching_volumes[0][k]
+
+ else:
+ if len(matching_volumes) > 1:
+ _msg = ("More than one volume matches in module call for absent state: {0}".format(
+ to_native(matching_volumes)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_volumes) == 1:
+ volume = matching_volumes[0]
+ packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = volume[k]
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str', default=None),
+ description=dict(type="str", default=None),
+ name=dict(type='str', default=None),
+ state=dict(choices=VOLUME_STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ project_id=dict(required=True),
+ plan=dict(choices=VOLUME_PLANS, default="storage_1"),
+ facility=dict(type="str"),
+ size=dict(type="int"),
+ locked=dict(type="bool", default=False),
+ snapshot_policy=dict(type='dict', default=None),
+ billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id", "description")],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ('id', 'description'),
+ ('name', 'description'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in VOLUME_STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_volume(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume state {0}: {1}".format(
+ state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
new file mode 100644
index 00000000..a1a38bb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_volume_attachment
+
+short_description: Attach/detach a volume to a device in the Packet host.
+
+description:
+ - Attach/detach a volume to a device in the Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/volumes/).
+ - "This module creates the attachment route in the Packet API. In order to discover
+ the block devices on the server, you have to run the Attach Scripts,
+ as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the attachment.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ project_id:
+ description:
+ - UUID of the project to which the device and volume belong.
+ type: str
+ required: true
+
+ volume:
+ description:
+ - Selector for the volume.
+ - It can be a UUID, an API-generated volume name, or user-defined description string.
+ - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
+ type: str
+ required: true
+
+ device:
+ description:
+ - Selector for the device.
+ - It can be a UUID of the device, or a hostname.
+ - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+
+ vars:
+ volname: testvol
+ devname: testdev
+ project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: ewr1
+ plan: storage_1
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+
+ - name: Create a device
+ packet_device:
+ project_id: "{{ project_id }}"
+ hostnames: "{{ devname }}"
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: ewr1
+ state: present
+
+ - name: Attach testvol to testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+
+ - name: Detach testvol from testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+ state: absent
+'''
+
+RETURN = '''
+volume_id:
+ description: UUID of volume addressed by the module call.
+ type: str
+ returned: success
+
+device_id:
+ description: UUID of device addressed by the module call.
+ type: str
+ returned: success
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+STATES = ["present", "absent"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['name'] == spec or v['description'] == spec
+
+
+def get_device_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['hostname'] == spec
+
+
+def do_attach(packet_conn, vol_id, dev_id):
+ api_method = "storage/{0}/attachments".format(vol_id)
+ packet_conn.call_api(
+ api_method,
+ params={"device_id": dev_id},
+ type="POST")
+
+
+def do_detach(packet_conn, vol, dev_id=None):
+ def dev_match(a):
+ return (dev_id is None) or (a['device']['id'] == dev_id)
+ for a in vol['attachments']:
+ if dev_match(a):
+ print(a['href'])
+ packet_conn.call_api(a['href'], type="DELETE")
+
+
+def validate_selected(l, resource_type, spec):
+ if len(l) > 1:
+ _msg = ("more than one {0} matches specification {1}: {2}".format(
+ resource_type, spec, l))
+ raise Exception(_msg)
+ if len(l) == 0:
+ _msg = "no {0} matches specification: {1}".format(resource_type, spec)
+ raise Exception(_msg)
+
+
+def get_attached_dev_ids(volume_dict):
+ if len(volume_dict['attachments']) == 0:
+ return []
+ else:
+ return [a['device']['id'] for a in volume_dict['attachments']]
+
+
+def act_on_volume_attachment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ volspec = module.params.get("volume")
+ devspec = module.params.get("device")
+ if devspec is None and target_state == 'present':
+ raise Exception("If you want to attach a volume, you must specify a device.")
+ project_id = module.params.get("project_id")
+ volumes_api_method = "projects/{0}/storage".format(project_id)
+ volumes = packet_conn.call_api(volumes_api_method,
+ params={'include': 'facility,attachments.device'})['volumes']
+ v_match = get_volume_selector(volspec)
+ matching_volumes = [v for v in volumes if v_match(v)]
+ validate_selected(matching_volumes, "volume", volspec)
+ volume = matching_volumes[0]
+ return_dict['volume_id'] = volume['id']
+
+ device = None
+ if devspec is not None:
+ devices_api_method = "projects/{0}/devices".format(project_id)
+ devices = packet_conn.call_api(devices_api_method)['devices']
+ d_match = get_device_selector(devspec)
+ matching_devices = [d for d in devices if d_match(d)]
+ validate_selected(matching_devices, "device", devspec)
+ device = matching_devices[0]
+ return_dict['device_id'] = device['id']
+
+ attached_device_ids = get_attached_dev_ids(volume)
+
+ if target_state == "present":
+ if len(attached_device_ids) == 0:
+ do_attach(packet_conn, volume['id'], device['id'])
+ return_dict['changed'] = True
+ elif device['id'] not in attached_device_ids:
+ # Don't reattach volume which is attached to a different device.
+ # Rather fail than force remove a device on state == 'present'.
+ raise Exception("volume {0} is already attached to device {1}".format(
+ volume, attached_device_ids))
+ else:
+ if device is None:
+ if len(attached_device_ids) > 0:
+ do_detach(packet_conn, volume)
+ return_dict['changed'] = True
+ elif device['id'] in attached_device_ids:
+ do_detach(packet_conn, volume, device['id'])
+ return_dict['changed'] = True
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ volume=dict(type="str", required=True),
+ project_id=dict(type="str", required=True),
+ device=dict(type="str"),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(
+ **act_on_volume_attachment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py
new file mode 100644
index 00000000..0931ddc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright: (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright: (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants.
+author:
+ - Indrajit Raychaudhuri (@indrajitr)
+ - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com>
+ - Maxime de Roucy (@tchernomax)
+options:
+ name:
+ description:
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
+ Can't be used in combination with C(upgrade).
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Desired state of the package.
+ default: present
+ choices: [ absent, latest, present, installed, removed ]
+ type: str
+
+ force:
+ description:
+ - When removing package, force remove package, without any checks.
+ Same as `extra_args="--nodeps --nodeps"`.
+ When update_cache, force redownload repo databases.
+ Same as `update_cache_extra_args="--refresh --refresh"`.
+ default: no
+ type: bool
+
+ extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(state).
+ default:
+ type: str
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists.
+ - This can be run as part of a package installation or as a separate step.
+ default: no
+ type: bool
+ aliases: [ update-cache ]
+
+ update_cache_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(update_cache).
+ default:
+ type: str
+
+ upgrade:
+ description:
+ - Whether or not to upgrade the whole system.
+ Can't be used in combination with C(name).
+ default: no
+ type: bool
+
+ upgrade_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(upgrade).
+ default:
+ type: str
+
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when upgrade is set to yes
+ type: list
+ sample: [ package, other-package ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo from repo
+ community.general.pacman:
+ name: foo
+ state: present
+
+- name: Install package bar from file
+ community.general.pacman:
+ name: ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package foo from repo and bar from file
+ community.general.pacman:
+ name:
+ - foo
+ - ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Upgrade package foo
+ community.general.pacman:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Remove packages foo and bar
+ community.general.pacman:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ extra_args: --recursive
+
+- name: Run the equivalent of "pacman -Sy" as a separate step
+ community.general.pacman:
+ update_cache: yes
+
+- name: Run the equivalent of "pacman -Su" as a separate step
+ community.general.pacman:
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Syu" as a separate step
+ community.general.pacman:
+ update_cache: yes
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Rdd", force remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ force: yes
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_version(pacman_output):
+ """Take pacman -Q or pacman -S output and get the Version"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[1]
+ return None
+
+
+def get_name(module, pacman_output):
+ """Take pacman -Q or pacman -S output and get the package name"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[0]
+ module.fail_json(msg="get_name: fail to retrieve package name from pacman output")
+
+
+def query_package(module, pacman_path, name, state="present"):
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
+ boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
+ """
+ if state == "present":
+ lcmd = "%s --query %s" % (pacman_path, name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False, False
+ else:
+ # a non-zero exit code doesn't always mean the package is installed
+ # for example, if the package name queried is "provided" by another package
+ installed_name = get_name(module, lstdout)
+ if installed_name != name:
+ return False, False, False
+
+ # get the version installed locally (if any)
+ lversion = get_version(lstdout)
+
+ rcmd = "%s --sync --print-format \"%%n %%v\" %s" % (pacman_path, name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ # get the version in the repository
+ rversion = get_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally, and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion), False
+
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
+
+
+def update_package_db(module, pacman_path):
+ if module.params['force']:
+ module.params["update_cache_extra_args"] += " --refresh --refresh"
+
+ cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def upgrade(module, pacman_path):
+ cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"])
+ cmdneedrefresh = "%s --query --upgrades" % (pacman_path)
+ rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
+ data = stdout.split('\n')
+ data.remove('')
+ packages = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if rc == 0:
+ # Match lines of `pacman -Qu` output of the form:
+ # (package name) (before version-release) -> (after version-release)
+ # e.g., "ansible 2.7.1-1 -> 2.7.2-1"
+ regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)')
+ for p in data:
+ m = regex.search(p)
+ packages.append(m.group(1))
+ if module._diff:
+ diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
+ diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
+ if module.check_mode:
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
+
+
+def remove_packages(module, pacman_path, packages):
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if module.params["force"]:
+ module.params["extra_args"] += " --nodeps --nodeps"
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ if module._diff:
+ d = stdout.split('\n')[2].split(' ')[2:]
+ for i, pkg in enumerate(d):
+ d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
+ diff['before'] += "%s\n" % pkg
+ data.append('\n'.join(d))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pacman_path, state, packages, package_files):
+ install_c = 0
+ package_err = []
+ message = ""
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ to_install_repos = []
+ to_install_files = []
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present or state == latest and is up-to-date then skip
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if package_files[i]:
+ to_install_files.append(package_files[i])
+ else:
+ to_install_repos.append(package)
+
+ if to_install_repos:
+ cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_repos)
+
+ if to_install_files:
+ cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_files)
+
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
+
+
+def check_packages(module, pacman_path, packages, state):
+ would_be_changed = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ 'before_header': '',
+ 'after_header': ''
+ }
+
+ for package in packages:
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+
+ if module._diff and (state == 'removed'):
+ diff['before_header'] = 'removed'
+ diff['before'] = '\n'.join(would_be_changed) + '\n'
+ elif module._diff and ((state == 'present') or (state == 'latest')):
+ diff['after_header'] = 'installed'
+ diff['after'] = '\n'.join(would_be_changed) + '\n'
+
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state), diff=diff)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
+
+
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ __, stdout, __ = module.run_command([pacman_path, "--sync", "--groups", "--quiet"], check_rc=True)
+ available_groups = stdout.splitlines()
+
+ for pkg in pkgs:
+ if pkg: # avoid empty strings
+ if pkg in available_groups:
+ # A group was found matching the package name: expand it
+ cmd = [pacman_path, "--sync", "--groups", "--quiet", pkg]
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ expanded.extend([name.strip() for name in stdout.splitlines()])
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=['pkg', 'package']),
+ state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']),
+ force=dict(type='bool', default=False),
+ extra_args=dict(type='str', default=''),
+ upgrade=dict(type='bool', default=False),
+ upgrade_extra_args=dict(type='str', default=''),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ update_cache_extra_args=dict(type='str', default=''),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ pacman_path = module.get_bin_path('pacman', True)
+ module.run_command_environ_update = dict(LC_ALL='C')
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ update_package_db(module, pacman_path)
+ if not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Updated the package master lists')
+
+ if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, pacman_path)
+
+ if p['name']:
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if not pkg: # avoid empty strings
+ continue
+ elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z|zst))?$", pkg):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pacman_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pacman_path, pkgs)
+ else:
+ module.exit_json(changed=False, msg="No package specified to work on.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py
new file mode 100644
index 00000000..306b596b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+ - "Bruce Pennypacker (@bpennypacker)"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ type: str
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ choices: [ "running", "started", "ongoing", "absent" ]
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ user:
+ type: str
+ description:
+ - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ token:
+ type: str
+ description:
+ - A pagerduty token, generated on the pagerduty site. It is used for authorization.
+ required: true
+ requester_id:
+ type: str
+ description:
+ - ID of user making the request. Only needed when creating a maintenance_window.
+ service:
+ type: list
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ aliases: [ services ]
+ window_id:
+ type: str
+ description:
+ - ID of maintenance window. Only needed when absent a maintenance_window.
+ hours:
+ type: str
+ description:
+ - Length of maintenance window in hours.
+ default: '1'
+ minutes:
+ type: str
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ default: '0'
+ desc:
+ type: str
+ description:
+ - Short description of maintenance window.
+ default: Created by Ansible
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: List ongoing maintenance windows using a token
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
+
+- name: Create a 1 hour maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ token: yourtoken
+ state: running
+ service: FOO123
+
+- name: Create a 5 minute maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
+
+
+- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment"
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
+ register: pd_window
+
+- name: Delete the previous maintenance window
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: absent
+ window_id: '{{ pd_window.result.maintenance_window.id }}'
+
+# Delete a maintenance window from a separate playbook than its creation,
+# and if it is the only existing maintenance window
+- name: Check
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: ongoing
+ register: pd_window
+
+- name: Delete
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: absent
+ window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
+'''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class PagerDutyRequest(object):
+ def __init__(self, module, name, user, token):
+ self.module = module
+ self.name = name
+ self.user = user
+ self.token = token
+ self.headers = {
+ 'Content-Type': 'application/json',
+ "Authorization": self._auth_header(),
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ def ongoing(self, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, False
+
+ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
+ if not requester_id:
+ self.module.fail_json(msg="requester_id is required when maintenance window should be created")
+
+ url = 'https://api.pagerduty.com/maintenance_windows'
+
+ headers = dict(self.headers)
+ headers.update({'From': requester_id})
+
+ start, end = self._compute_start_end_time(hours, minutes)
+ services = self._create_services_payload(service)
+
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
+
+ data = json.dumps(request_data)
+ response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 201:
+ self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _create_services_payload(self, service):
+ if (isinstance(service, list)):
+ return [{'id': s, 'type': 'service_reference'} for s in service]
+ else:
+ return [{'id': service, 'type': 'service_reference'}]
+
+ def _compute_start_end_time(self, hours, minutes):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return start, end
+
+ def absent(self, window_id, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows/" + window_id
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers, method='DELETE')
+ if info['status'] != 204:
+ self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _auth_header(self):
+ return "Token token=%s" % self.token
+
+ def _read_response(self, response):
+ try:
+ return json.loads(response.read())
+ except Exception:
+ return ""
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=False),
+ user=dict(required=False),
+ token=dict(required=True, no_log=True),
+ service=dict(required=False, type='list', aliases=["services"]),
+ window_id=dict(required=False),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False), # @TODO change to int?
+ minutes=dict(default='0', required=False), # @TODO change to int?
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ service = module.params['service']
+ window_id = module.params['window_id']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ pd = PagerDutyRequest(module, name, user, token)
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed = True
+
+ if state == "ongoing":
+ (rc, out, changed) = pd.ongoing()
+
+ if state == "absent":
+ (rc, out, changed) = pd.absent(window_id)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
new file mode 100644
index 00000000..736ada5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+author:
+ - "Amanpreet Singh (@ApsOps)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ service_id:
+ type: str
+ description:
+ - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ required: true
+ service_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ state:
+ type: str
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ type: str
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ type: str
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Trigger an incident with just the basic options
+ community.general.pagerduty_alert:
+ name: companyabc
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+
+- name: Acknowledge an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: acknowledged
+ incident_key: somekey
+ desc: "some text for incident's log"
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: resolved
+ incident_key: somekey
+ desc: "some text for incident's log"
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+
+
+def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
+ url = 'https://api.pagerduty.com/incidents'
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key,
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ params = {
+ 'service_ids[]': service_id,
+ 'sort_by': 'incident_number:desc',
+ 'time_zone': 'UTC'
+ }
+ if incident_key:
+ params['incident_key'] = incident_key
+
+ url_parts = list(urlparse(url))
+ url_parts[4] = urlencode(params, True)
+
+ url = urlunparse(url_parts)
+
+ response, info = http_call(module, url, method='get', headers=headers)
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+
+ incidents = json.loads(response.read())["incidents"]
+ msg = "No corresponding incident"
+
+ if len(incidents) == 0:
+ if state in ('acknowledged', 'resolved'):
+ return msg, False
+ return msg, True
+ elif state != incidents[0]["status"]:
+ return incidents[0], True
+
+ return incidents[0], False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ service_id=dict(required=True),
+ service_key=dict(required=False, no_log=True),
+ integration_key=dict(required=False, no_log=True),
+ api_key=dict(required=True, no_log=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_id = module.params['service_id']
+ integration_key = module.params['integration_key']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ if integration_key is None:
+ if service_key is not None:
+ integration_key = service_key
+ module.warn('"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead')
+ else:
+ module.fail_json(msg="'integration_key' is required parameter")
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py
new file mode 100644
index 00000000..358a6961
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: pagerduty_change
+short_description: Track a code or infrastructure change as a PagerDuty change event
+version_added: 1.3.0
+description:
+ - This module will let you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event will be created each time it is run.
+author:
+ - Adam Vaughan (@adamvaughan)
+requirements:
+ - PagerDuty integration key
+options:
+ integration_key:
+ description:
+ - The integration key that identifies the service the change was made to.
+ This can be found by adding an integration to a service in PagerDuty.
+ required: true
+ type: str
+ summary:
+ description:
+ - A short description of the change that occurred.
+ required: true
+ type: str
+ source:
+ description:
+ - The source of the change event.
+ default: Ansible
+ type: str
+ user:
+ description:
+ - The name of the user or process that triggered this deployment.
+ type: str
+ repo:
+ description:
+ - The URL of the project repository.
+ required: false
+ type: str
+ revision:
+ description:
+ - An identifier of the revision being deployed, typically a number or SHA from a version control system.
+ required: false
+ type: str
+ environment:
+ description:
+ - The environment name, typically C(production), C(staging), etc.
+ required: false
+ type: str
+ link_url:
+ description:
+ - A URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ link_text:
+ description:
+ - Descriptive text for a URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ url:
+ description:
+ - URL to submit the change event to.
+ required: false
+ default: https://events.pagerduty.com/v2/change/enqueue
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target URL will not be validated.
+ This should only be used on personally controlled sites using self-signed certificates.
+ required: false
+ default: yes
+ type: bool
+notes:
+ - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+'''
+
+EXAMPLES = '''
+- name: Track the deployment as a PagerDuty change event
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+
+- name: Track the deployment as a PagerDuty change event with more details
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+ source: Ansible Deploy
+ user: ansible
+ repo: github.com/ansible/ansible
+ revision: '4.2'
+ environment: production
+ link_url: https://github.com/ansible-collections/community.general/pull/1269
+ link_text: View changes on GitHub
+'''
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+from datetime import datetime
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ integration_key=dict(required=True, type='str', no_log=True),
+ summary=dict(required=True, type='str'),
+ source=dict(required=False, default='Ansible', type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ environment=dict(required=False, type='str'),
+ link_url=dict(required=False, type='str'),
+ link_text=dict(required=False, type='str'),
+ url=dict(required=False,
+ default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
+
+ url = module.params['url']
+ headers = {'Content-Type': 'application/json'}
+
+ if module.check_mode:
+ _response, info = fetch_url(
+ module, url, headers=headers, method='POST')
+
+ if info['status'] == 400:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status']))
+
+ custom_details = {}
+
+ if module.params['user']:
+ custom_details['user'] = module.params['user']
+
+ if module.params['repo']:
+ custom_details['repo'] = module.params['repo']
+
+ if module.params['revision']:
+ custom_details['revision'] = module.params['revision']
+
+ if module.params['environment']:
+ custom_details['environment'] = module.params['environment']
+
+ now = datetime.utcnow()
+ timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ payload = {
+ 'summary': module.params['summary'],
+ 'source': module.params['source'],
+ 'timestamp': timestamp,
+ 'custom_details': custom_details
+ }
+
+ event = {
+ 'routing_key': module.params['integration_key'],
+ 'payload': payload
+ }
+
+ if module.params['link_url']:
+ link = {
+ 'href': module.params['link_url']
+ }
+
+ if module.params['link_text']:
+ link['text'] = module.params['link_text']
+
+ event['links'] = [link]
+
+ _response, info = fetch_url(
+ module, url, data=module.jsonify(event), headers=headers, method='POST')
+
+ if info['status'] == 202:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Creating PagerDuty change event failed with %d' % (info['status']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py
new file mode 100644
index 00000000..4b20a321
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pagerduty_user
+short_description: Manage a user account on PagerDuty
+description:
+ - This module manages the creation/removal of a user account on PagerDuty.
+version_added: '1.3.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - pdpyras python module = 4.1.1
+ - PagerDuty API Access
+options:
+ access_token:
+ description:
+ - An API access token to authenticate with the PagerDuty REST API.
+ required: true
+ type: str
+ pd_user:
+ description:
+ - Name of the user in PagerDuty.
+ required: true
+ type: str
+ pd_email:
+ description:
+ - The user's email address.
+ - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ pd_role:
+ description:
+ - The user's role.
+ choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']
+ default: 'responder'
+ type: str
+ state:
+ description:
+ - State of the user.
+ - On C(present), it creates a user if the user doesn't exist.
+ - On C(absent), it removes a user if the account exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ pd_teams:
+ description:
+ - The teams to which the user belongs.
+ - Required if I(state=present).
+ type: list
+ elements: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Create a user account on PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ pd_role: user_pd_role
+ pd_teams: user_pd_teams
+ state: "present"
+
+- name: Remove a user account from PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ state: "absent"
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+from os import path
+
+try:
+ from pdpyras import APISession
+ HAS_PD_PY = True
+except ImportError:
+ HAS_PD_PY = False
+ PD_IMPORT_ERR = traceback.format_exc()
+
+try:
+ from pdpyras import PDClientError
+ HAS_PD_CLIENT_ERR = True
+except ImportError:
+ HAS_PD_CLIENT_ERR = False
+ PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc()
+
+
+class PagerDutyUser(object):
+ def __init__(self, module, session):
+ self._module = module
+ self._apisession = session
+
+ # check if the user exists
+ def does_user_exist(self, pd_email):
+ for user in self._apisession.iter_all('users'):
+ if user['email'] == pd_email:
+ return user['id']
+
+ # create a user account on PD
+ def add_pd_user(self, pd_name, pd_email, pd_role):
+ try:
+ user = self._apisession.persist('users', 'email', {
+ "name": pd_name,
+ "email": pd_email,
+ "type": "user",
+ "role": pd_role,
+ })
+ return user
+
+ except PDClientError as e:
+ if e.response.status_code == 400:
+ self._module.fail_json(
+ msg="Failed to add %s due to invalid argument" % (pd_name))
+ if e.response.status_code == 401:
+ self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name))
+ if e.response.status_code == 402:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to add %s due to reaching the limit of making requests" % (pd_name))
+
+ # delete a user account from PD
+ def delete_user(self, pd_user_id, pd_name):
+ try:
+ user_path = path.join('/users/', pd_user_id)
+ self._apisession.rdelete(user_path)
+
+ except PDClientError as e:
+ if e.response.status_code == 404:
+ self._module.fail_json(
+ msg="Failed to remove %s as user was not found" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 401:
+ # print out the list of incidents
+ pd_incidents = self.get_incidents_assigned_to_user(pd_user_id)
+ self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name))
+
+ # get incidents assigned to a user
+ def get_incidents_assigned_to_user(self, pd_user_id):
+ incident_info = {}
+ incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]})
+
+ for incident in incidents:
+ incident_info = {
+ 'title': incident['title'],
+ 'key': incident['incident_key'],
+ 'status': incident['status']
+ }
+ return incident_info
+
+ # add a user to a team/teams
+ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role):
+ updated_team = None
+ for team in pd_teams:
+ team_info = self._apisession.find('teams', team, attribute='name')
+ if team_info is not None:
+ try:
+ updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={
+ 'role': pd_role
+ })
+ except PDClientError:
+ updated_team = None
+ return updated_team
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ access_token=dict(type='str', required=True, no_log=True),
+ pd_user=dict(type='str', required=True),
+ pd_email=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ pd_role=dict(type='str', default='responder',
+ choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
+ pd_teams=dict(type='list', elements='str', required=False)),
+ required_if=[['state', 'present', ['pd_teams']], ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PD_PY:
+ module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR)
+
+ if not HAS_PD_CLIENT_ERR:
+ module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR)
+
+ access_token = module.params['access_token']
+ pd_user = module.params['pd_user']
+ pd_email = module.params['pd_email']
+ state = module.params['state']
+ pd_role = module.params['pd_role']
+ pd_teams = module.params['pd_teams']
+
+ if pd_role:
+ pd_role_gui_value = {
+ 'global_admin': 'admin',
+ 'manager': 'user',
+ 'responder': 'limited_user',
+ 'observer': 'observer',
+ 'stakeholder': 'read_only_user',
+ 'limited_stakeholder': 'read_only_limited_user',
+ 'restricted_access': 'restricted_access'
+ }
+ pd_role = pd_role_gui_value[pd_role]
+
+ # authenticate with PD API
+ try:
+ session = APISession(access_token)
+ except PDClientError as e:
+ module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e)
+
+ user = PagerDutyUser(module, session)
+
+ user_exists = user.does_user_exist(pd_email)
+
+ if user_exists:
+ if state == "absent":
+ # remove user
+ if not module.check_mode:
+ user.delete_user(user_exists, pd_user)
+ module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user)
+ else:
+ module.exit_json(changed=False, result="User %s already exists." % pd_user)
+
+ # in case that the user does not exist
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="User %s was not found." % pd_user)
+
+ else:
+ # add user, adds user with the default notification rule and contact info (email)
+ if not module.check_mode:
+ user.add_pd_user(pd_user, pd_email, pd_role)
+ # get user's id
+ pd_user_id = user.does_user_exist(pd_email)
+ # add a user to the team/s
+ user.add_user_to_teams(pd_user_id, pd_teams, pd_role)
+ module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py
new file mode 100644
index 00000000..c63493ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pam_limits
+author:
+ - "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The C(pam_limits) module modifies PAM limits. The default file is
+ C(/etc/security/limits.conf). For the full documentation, see C(man 5
+ limits.conf).
+options:
+ domain:
+ type: str
+ description:
+ - A username, @groupname, wildcard, uid/gid range.
+ required: true
+ limit_type:
+ type: str
+ description:
+ - Limit type, see C(man 5 limits.conf) for an explanation
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ type: str
+ description:
+ - The limit to be set
+ required: true
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
+ value:
+ type: str
+ description:
+ - The value of the limit.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ type: bool
+ default: "no"
+ use_min:
+ description:
+ - If set to C(yes), the minimal value will be used or conserved.
+ If the specified value is inferior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ use_max:
+ description:
+ - If set to C(yes), the maximal value will be used or conserved.
+ If the specified value is superior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ dest:
+ type: str
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ type: str
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+notes:
+ - If C(dest) file doesn't exist, it is created.
+'''
+
+EXAMPLES = '''
+- name: Add or modify nofile soft limit for the user joe
+ community.general.pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+ community.general.pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: yes
+
+- name: Add or modify memlock, both soft and hard, limit for the user james with a comment.
+ community.general.pam_limits:
+ domain: james
+ limit_type: '-'
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
+
+- name: Add or modify hard nofile limits for wildcard domain
+ community.general.pam_limits:
+ domain: '*'
+ limit_type: hard
+ limit_item: nofile
+ value: 39693561
+'''
+
+import os
+import os.path
+import tempfile
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
+
+ pam_types = ['soft', 'hard', '-']
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ domain=dict(required=True, type='str'),
+ limit_type=dict(required=True, type='str', choices=pam_types),
+ limit_item=dict(required=True, type='str', choices=pam_items),
+ value=dict(required=True, type='str'),
+ use_max=dict(default=False, type='bool'),
+ use_min=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ dest=dict(default=limits_conf, type='str'),
+ comment=dict(required=False, default='', type='str')
+ )
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
+ else:
+ limits_conf_dir = os.path.dirname(limits_conf)
+ if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
+ open(limits_conf, 'a').close()
+ changed = True
+ else:
+ module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time.")
+
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open(limits_conf, 'rb')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(mode='w+')
+
+ found = False
+ new_value = value
+
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#', 1)[0]
+ try:
+ old_comment = line.split('#', 1)[1]
+ except Exception:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
+ if use_max:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ if use_min:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, f.name)
+
+ try:
+ nf.close()
+ except Exception:
+ pass
+
+ res_args = dict(
+ changed=changed, msg=message
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py
new file mode 100644
index 00000000..45f00826
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: pamd
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Manage PAM Modules
+description:
+ - Edit PAM service's type, control, module path and module arguments.
+ - In order for a PAM rule to be modified, the type, control and
+ module_path must match an existing rule. See man(5) pam.d for details.
+options:
+ name:
+ description:
+ - The name generally refers to the PAM service file to
+ change, for example system-auth.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ control:
+ description:
+ - The control of the PAM rule being modified.
+ - This may be a complicated control with brackets. If this is the case, be
+ sure to put "[bracketed controls]" in quotes.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ module_path:
+ description:
+ - The module path of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ new_type:
+ description:
+ - The new type to assign to the new rule.
+ type: str
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ new_control:
+ description:
+ - The new control to assign to the new rule.
+ type: str
+ new_module_path:
+ description:
+ - The new module path to be assigned to the new rule.
+ type: str
+ module_arguments:
+ description:
+ - When state is C(updated), the module_arguments will replace existing module_arguments.
+ - When state is C(args_absent) args matching those listed in module_arguments will be removed.
+ - When state is C(args_present) any args listed in module_arguments are added if
+ missing from the existing rule.
+ - Furthermore, if the module argument takes a value denoted by C(=),
+ the value will be changed to that specified in module_arguments.
+ type: list
+ elements: str
+ state:
+ description:
+ - The default of C(updated) will modify an existing rule if type,
+ control and module_path all match an existing rule.
+ - With C(before), the new rule will be inserted before a rule matching type,
+ control and module_path.
+ - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ control and module_path.
+ - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
+ - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
+ - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ type: str
+ choices: [ absent, before, after, args_absent, args_present, updated ]
+ default: updated
+ path:
+ description:
+ - This is the path to the PAM service files.
+ type: path
+ default: /etc/pam.d
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Update pamd rule's control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_control: sufficient
+
+- name: Update pamd rule's complex control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ new_control: '[success=2 default=ignore]'
+
+- name: Insert a new rule before an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_type: auth
+ new_control: sufficient
+ new_module_path: pam_faillock.so
+ state: before
+
+- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
+ existing rule pam_rootok.so
+ community.general.pamd:
+ name: su
+ type: auth
+ control: sufficient
+ module_path: pam_rootok.so
+ new_type: auth
+ new_control: required
+ new_module_path: pam_wheel.so
+ module_arguments: 'use_uid'
+ state: after
+
+- name: Remove module arguments from an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: ''
+ state: updated
+
+- name: Replace all module arguments in an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'preauth
+ silent
+ deny=3
+ unlock_time=604800
+ fail_interval=900'
+ state: updated
+
+- name: Remove specific arguments from a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_absent
+
+- name: Ensure specific arguments are present in a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_present
+
+- name: Ensure specific arguments are present in a rule (alternative)
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments:
+ - crond
+ - quiet
+ state: args_present
+
+- name: Module arguments requiring commas must be listed as a Yaml list
+ community.general.pamd:
+ name: special-module
+ type: account
+ control: required
+ module_path: pam_access.so
+ module_arguments:
+ - listsep=,
+ state: args_present
+
+- name: Update specific argument value in a rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'fail_interval=300'
+ state: args_present
+
+- name: Add pam common-auth rule for duo
+ community.general.pamd:
+ name: common-auth
+ new_type: auth
+ new_control: '[success=1 default=ignore]'
+ new_module_path: '/lib64/security/pam_duo.so'
+ state: after
+ type: auth
+ module_path: pam_sss.so
+ control: 'requisite'
+'''
+
+RETURN = r'''
+change_count:
+ description: How many rules were changed.
+ type: int
+ sample: 1
+ returned: success
+new_rule:
+ description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6.
+ type: str
+ sample: None None None sha512 shadow try_first_pass use_authtok
+ returned: success
+updated_rule_(n):
+ description: The rule(s) that was/were changed. This is only available in
+ Ansible 2.4 and was removed in Ansible 2.5.
+ type: str
+ sample:
+ - password sufficient pam_unix.so sha512 shadow try_first_pass
+ use_authtok
+ returned: success
+action:
+ description:
+ - "That action that was taken and is one of: update_rule,
+ insert_before_rule, insert_after_rule, args_present, args_absent,
+ absent. This was available in Ansible 2.4 and removed in Ansible 2.8"
+ returned: always
+ type: str
+ sample: "update_rule"
+dest:
+ description:
+ - "Path to pam.d service that was changed. This is only available in
+ Ansible 2.3 and was removed in Ansible 2.4."
+ returned: success
+ type: str
+ sample: "/etc/pam.d/system-auth"
+backupdest:
+ description:
+ - "The file name of the backup file, if created."
+ returned: success
+ type: str
+...
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+from tempfile import NamedTemporaryFile
+from datetime import datetime
+
+
+RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
+ (?P<control>\[.*\]|\S*)\s+
+ (?P<path>\S*)\s*
+ (?P<args>.*)\s*""", re.X)
+
+RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
+
+VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
+
+
+class PamdLine(object):
+
+ def __init__(self, line):
+ self.line = line
+ self.prev = None
+ self.next = None
+
+ @property
+ def is_valid(self):
+ if self.line.strip() == '':
+ return True
+ return False
+
+ def validate(self):
+ if not self.is_valid:
+ return False, "Rule is not valid " + self.line
+ return True, "Rule is valid " + self.line
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return False
+
+ def __str__(self):
+ return str(self.line)
+
+
+class PamdEmptyLine(PamdLine):
+ pass
+
+
+class PamdComment(PamdLine):
+
+ def __init__(self, line):
+ super(PamdComment, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('#'):
+ return True
+ return False
+
+
+class PamdInclude(PamdLine):
+ def __init__(self, line):
+ super(PamdInclude, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('@include'):
+ return True
+ return False
+
+
+class PamdRule(PamdLine):
+
+ valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
+ valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
+ 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
+ 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
+ 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
+ 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
+ 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
+ 'incomplete', 'default']
+ valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
+
+ def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
+ self.prev = None
+ self.next = None
+ self._control = None
+ self._args = None
+ self.rule_type = rule_type
+ self.rule_control = rule_control
+
+ self.rule_path = rule_path
+ self.rule_args = rule_args
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ if (rule_type == self.rule_type and
+ rule_control == self.rule_control and
+ rule_path == self.rule_path):
+ return True
+ return False
+
+ @classmethod
+ def rule_from_string(cls, line):
+ rule_match = RULE_REGEX.search(line)
+ rule_args = parse_module_arguments(rule_match.group('args'))
+ return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
+
+ def __str__(self):
+ if self.rule_args:
+ return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
+ return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
+
+ @property
+ def rule_control(self):
+ if isinstance(self._control, list):
+ return '[' + ' '.join(self._control) + ']'
+ return self._control
+
+ @rule_control.setter
+ def rule_control(self, control):
+ if control.startswith('['):
+ control = control.replace(' = ', '=').replace('[', '').replace(']', '')
+ self._control = control.split(' ')
+ else:
+ self._control = control
+
+ @property
+ def rule_args(self):
+ if not self._args:
+ return []
+ return self._args
+
+ @rule_args.setter
+ def rule_args(self, args):
+ self._args = parse_module_arguments(args)
+
+ @property
+ def line(self):
+ return str(self)
+
+ @classmethod
+ def is_action_unsigned_int(cls, string_num):
+ number = 0
+ try:
+ number = int(string_num)
+ except ValueError:
+ return False
+
+ if number >= 0:
+ return True
+ return False
+
+ @property
+ def is_valid(self):
+ return self.validate()[0]
+
+ def validate(self):
+ # Validate the rule type
+ if self.rule_type not in VALID_TYPES:
+ return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
+ # Validate the rule control
+ if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
+ return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
+ elif isinstance(self._control, list):
+ for control in self._control:
+ value, action = control.split("=")
+ if value not in PamdRule.valid_control_values:
+ return False, "Rule control value, " + value + ", is not valid in rule " + self.line
+ if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
+ return False, "Rule control action, " + action + ", is not valid in rule " + self.line
+
+ # TODO: Validate path
+
+ return True, "Rule is valid " + self.line
+
+
+# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
+# as a doubly linked list.
+class PamdService(object):
+
+ def __init__(self, content):
+ self._head = None
+ self._tail = None
+ for line in content.splitlines():
+ if line.lstrip().startswith('#'):
+ pamd_line = PamdComment(line)
+ elif line.lstrip().startswith('@include'):
+ pamd_line = PamdInclude(line)
+ elif line.strip() == '':
+ pamd_line = PamdEmptyLine(line)
+ else:
+ pamd_line = PamdRule.rule_from_string(line)
+
+ self.append(pamd_line)
+
+ def append(self, pamd_line):
+ if self._head is None:
+ self._head = self._tail = pamd_line
+ else:
+ pamd_line.prev = self._tail
+ pamd_line.next = None
+ self._tail.next = pamd_line
+ self._tail = pamd_line
+
+ def remove(self, rule_type, rule_control, rule_path):
+ current_line = self._head
+ changed = 0
+
+ while current_line is not None:
+ if current_line.matches(rule_type, rule_control, rule_path):
+ if current_line.prev is not None:
+ current_line.prev.next = current_line.next
+ if current_line.next is not None:
+ current_line.next.prev = current_line.prev
+ else:
+ self._head = current_line.next
+ current_line.next.prev = None
+ changed += 1
+
+ current_line = current_line.next
+ return changed
+
+ def get(self, rule_type, rule_control, rule_path):
+ lines = []
+ current_line = self._head
+ while current_line is not None:
+
+ if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
+ lines.append(current_line)
+
+ current_line = current_line.next
+
+ return lines
+
+ def has_rule(self, rule_type, rule_control, rule_path):
+ if self.get(rule_type, rule_control, rule_path):
+ return True
+ return False
+
+ def update_rule(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ new_args = parse_module_arguments(new_args)
+
+ changes = 0
+ for current_rule in rules_to_find:
+ rule_changed = False
+ if new_type:
+ if(current_rule.rule_type != new_type):
+ rule_changed = True
+ current_rule.rule_type = new_type
+ if new_control:
+ if(current_rule.rule_control != new_control):
+ rule_changed = True
+ current_rule.rule_control = new_control
+ if new_path:
+ if(current_rule.rule_path != new_path):
+ rule_changed = True
+ current_rule.rule_path = new_path
+ if new_args:
+ if(current_rule.rule_args != new_args):
+ rule_changed = True
+ current_rule.rule_args = new_args
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def insert_before(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist before the existing rule
+ # 2. The new rule exists
+
+ for current_rule in rules_to_find:
+ # Create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ # First we'll get the previous rule.
+ previous_rule = current_rule.prev
+
+ # Next we may have to loop backwards if the previous line is a comment. If it
+ # is, we'll get the previous "rule's" previous.
+ while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)):
+ previous_rule = previous_rule.prev
+ # Next we'll see if the previous rule matches what we are trying to insert.
+ if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
+ # First set the original previous rule's next to the new_rule
+ previous_rule.next = new_rule
+ # Second, set the new_rule's previous to the original previous
+ new_rule.prev = previous_rule
+ # Third, set the new rule's next to the current rule
+ new_rule.next = current_rule
+ # Fourth, set the current rule's previous to the new_rule
+ current_rule.prev = new_rule
+
+ changes += 1
+
+ # Handle the case where it is the first rule in the list.
+ elif previous_rule is None:
+ # This is the case where the current rule is not only the first rule
+ # but the first line as well. So we set the head to the new rule
+ if current_rule.prev is None:
+ self._head = new_rule
+ # This case would occur if the previous line was a comment.
+ else:
+ current_rule.prev.next = new_rule
+ new_rule.prev = current_rule.prev
+ new_rule.next = current_rule
+ current_rule.prev = new_rule
+ changes += 1
+
+ return changes
+
+ def insert_after(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist after the existing rule
+ # 2. The new rule exists
+ for current_rule in rules_to_find:
+ # First we'll get the next rule.
+ next_rule = current_rule.next
+ # Next we may have to loop forwards if the next line is a comment. If it
+ # is, we'll get the next "rule's" next.
+ while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)):
+ next_rule = next_rule.next
+
+ # First we create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
+ # If the previous rule doesn't match we'll insert our new rule.
+
+ # Second set the original next rule's previous to the new_rule
+ next_rule.prev = new_rule
+ # Third, set the new_rule's next to the original next rule
+ new_rule.next = next_rule
+ # Fourth, set the new rule's previous to the current rule
+ new_rule.prev = current_rule
+ # Fifth, set the current rule's next to the new_rule
+ current_rule.next = new_rule
+
+ changes += 1
+
+ # This is the case where the current_rule is the last in the list
+ elif next_rule is None:
+ new_rule.prev = self._tail
+ new_rule.next = None
+ self._tail.next = new_rule
+ self._tail = new_rule
+
+ current_rule.next = new_rule
+ changes += 1
+
+ return changes
+
+ def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_add = parse_module_arguments(args_to_add)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ rule_changed = False
+
+ # create some structures to evaluate the situation
+ simple_new_args = set()
+ key_value_new_args = dict()
+
+ for arg in args_to_add:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_new_args[key] = value
+ else:
+ simple_new_args.add(arg)
+
+ key_value_new_args_set = set(key_value_new_args)
+
+ simple_current_args = set()
+ key_value_current_args = dict()
+
+ for arg in current_rule.rule_args:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_current_args[key] = value
+ else:
+ simple_current_args.add(arg)
+
+ key_value_current_args_set = set(key_value_current_args)
+
+ new_args_to_add = list()
+
+ # Handle new simple arguments
+ if simple_new_args.difference(simple_current_args):
+ for arg in simple_new_args.difference(simple_current_args):
+ new_args_to_add.append(arg)
+
+ # Handle new key value arguments
+ if key_value_new_args_set.difference(key_value_current_args_set):
+ for key in key_value_new_args_set.difference(key_value_current_args_set):
+ new_args_to_add.append(key + '=' + key_value_new_args[key])
+
+ if new_args_to_add:
+ current_rule.rule_args += new_args_to_add
+ rule_changed = True
+
+ # Handle existing key value arguments when value is not equal
+ if key_value_new_args_set.intersection(key_value_current_args_set):
+ for key in key_value_new_args_set.intersection(key_value_current_args_set):
+ if key_value_current_args[key] != key_value_new_args[key]:
+ arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
+ current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
+ rule_changed = True
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_remove = parse_module_arguments(args_to_remove)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ if not args_to_remove:
+ args_to_remove = []
+
+ # Let's check to see if there are any args to remove by finding the intersection
+ # of the rule's current args and the args_to_remove lists
+ if not list(set(current_rule.rule_args) & set(args_to_remove)):
+ continue
+
+ # There are args to remove, so we create a list of new_args absent the args
+ # to remove.
+ current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
+
+ changes += 1
+
+ return changes
+
+ def validate(self):
+ current_line = self._head
+
+ while current_line is not None:
+ if not current_line.validate()[0]:
+ return current_line.validate()
+ current_line = current_line.next
+ return True, "Module is valid"
+
+ def __str__(self):
+ lines = []
+ current_line = self._head
+
+ while current_line is not None:
+ lines.append(str(current_line))
+ current_line = current_line.next
+
+ if lines[1].startswith("# Updated by Ansible"):
+ lines.pop(1)
+
+ lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
+
+ return '\n'.join(lines) + '\n'
+
+
+def parse_module_arguments(module_arguments):
+ # Return empty list if we have no args to parse
+ if not module_arguments:
+ return []
+ elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
+ return []
+
+ if not isinstance(module_arguments, list):
+ module_arguments = [module_arguments]
+
+ parsed_args = list()
+
+ for arg in module_arguments:
+ for item in filter(None, RULE_ARG_REGEX.findall(arg)):
+ if not item.startswith("["):
+ re.sub("\\s*=\\s*", "=", item)
+ parsed_args.append(item)
+
+ return parsed_args
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=VALID_TYPES),
+ control=dict(type='str', required=True),
+ module_path=dict(type='str', required=True),
+ new_type=dict(type='str', choices=VALID_TYPES),
+ new_control=dict(type='str'),
+ new_module_path=dict(type='str'),
+ module_arguments=dict(type='list', elements='str'),
+ state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
+ path=dict(type='path', default='/etc/pam.d'),
+ backup=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "args_present", ["module_arguments"]),
+ ("state", "args_absent", ["module_arguments"]),
+ ("state", "before", ["new_control", "new_type", "new_module_path"]),
+ ("state", "after", ["new_control", "new_type", "new_module_path"]),
+ ],
+ )
+ content = str()
+ fname = os.path.join(module.params["path"], module.params["name"])
+
+ # Open the file and read the content or fail
+ try:
+ with open(fname, 'r') as service_file_obj:
+ content = service_file_obj.read()
+ except IOError as e:
+ # If unable to read the file, fail out
+ module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e)))
+
+ # Assuming we didn't fail, create the service
+ service = PamdService(content)
+ # Set the action
+ action = module.params['state']
+
+ changes = 0
+
+ # Take action
+ if action == 'updated':
+ changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'before':
+ changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'after':
+ changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_absent':
+ changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_present':
+ if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
+ module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
+
+ changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'absent':
+ changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
+
+ valid, msg = service.validate()
+
+ # If the module is not valid (meaning one of the rules is invalid), we will fail
+ if not valid:
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=(changes > 0),
+ change_count=changes,
+ backupdest='',
+ )
+
+ # If not check mode and something changed, backup the original if necessary then write out the file or fail
+ if not module.check_mode and result['changed']:
+ # First, create a backup if desired.
+ if module.params['backup']:
+ result['backupdest'] = module.backup_local(fname)
+ try:
+ temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
+ with open(temp_file.name, 'w') as fd:
+ fd.write(str(service))
+
+ except IOError:
+ module.fail_json(msg='Unable to create temporary \
+ file %s' % temp_file)
+
+ module.atomic_move(temp_file.name, os.path.realpath(fname))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py
new file mode 100644
index 00000000..daf68c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py
@@ -0,0 +1,797 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Fabrizio Colonna (@ColOfAbRiX)
+module: parted
+short_description: Configure block device partitions
+description:
+ - This module allows configuring block device partition using the C(parted)
+ command line tool. For a full description of the fields and the options
+ check the GNU parted manual.
+requirements:
+ - This module requires parted version 1.8.3 and above
+ - align option (except 'undefined') requires parted 2.1 and above
+ - If the version of parted is below 3.1, it requires a Linux version running
+ the sysfs file system C(/sys/).
+options:
+ device:
+ description: The block device (disk) where to operate.
+ type: str
+ required: True
+ align:
+ description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
+ type: str
+ choices: [ cylinder, minimal, none, optimal, undefined ]
+ default: optimal
+ number:
+ description:
+ - The number of the partition to work with or the number of the partition
+ that will be created.
+ - Required when performing any action on the disk, except fetching information.
+ type: int
+ unit:
+ description:
+ - Selects the current default unit that Parted will use to display
+ locations and capacities on the disk and to interpret those given by the
+ user if they are not suffixed by an unit.
+ - When fetching information about a disk, it is always recommended to specify a unit.
+ type: str
+ choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
+ default: KiB
+ label:
+ description:
+ - Disk label type to use.
+ - If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
+ type: str
+ choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
+ default: msdos
+ part_type:
+ description:
+ - May be specified only with 'msdos' or 'dvh' partition tables.
+ - A C(name) must be specified for a 'gpt' partition table.
+ - Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
+ type: str
+ choices: [ extended, logical, primary ]
+ default: primary
+ part_start:
+ description:
+ - Where the partition will start as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ - Using negative values may require setting of C(fs_type) (see notes).
+ type: str
+ default: 0%
+ part_end:
+ description:
+ - Where the partition will end as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ type: str
+ default: 100%
+ name:
+ description:
+ - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
+ type: str
+ flags:
+ description: A list of the flags that has to be set on the partition.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to create or delete a partition.
+ - If set to C(info) the module will only return the device information.
+ type: str
+ choices: [ absent, present, info ]
+ default: info
+ fs_type:
+ description:
+ - If specified and the partition does not exist, will set filesystem type to given partition.
+ - Parameter optional, but see notes below about negative negative C(part_start) values.
+ type: str
+ version_added: '0.2.0'
+ resize:
+ description:
+ - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ type: bool
+ default: false
+ version_added: '1.3.0'
+
+notes:
+ - When fetching information about a new disk and when the version of parted
+ installed on the system is before version 3.1, the module queries the kernel
+ through C(/sys/) to obtain disk information. In this case the units CHS and
+ CYL are not supported.
+ - Negative C(part_start) start values were rejected if C(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
+ specify C(fs_type) as well or make sure your system contains newer parted.
+'''
+
+RETURN = r'''
+partition_info:
+ description: Current partition information
+ returned: success
+ type: complex
+ contains:
+ disk:
+ description: Generic device information.
+ type: dict
+ partitions:
+ description: List of device partitions.
+ type: list
+ script:
+ description: parted script executed by module
+ type: str
+ sample: {
+ "disk": {
+ "dev": "/dev/sdb",
+ "logical_block": 512,
+ "model": "VMware Virtual disk",
+ "physical_block": 512,
+ "size": 5.0,
+ "table": "msdos",
+ "unit": "gib"
+ },
+ "partitions": [{
+ "begin": 0.0,
+ "end": 1.0,
+ "flags": ["boot", "lvm"],
+ "fstype": "",
+ "name": "",
+ "num": 1,
+ "size": 1.0
+ }, {
+ "begin": 1.0,
+ "end": 5.0,
+ "flags": [],
+ "fstype": "",
+ "name": "",
+ "num": 2,
+ "size": 4.0
+ }],
+ "script": "unit KiB print "
+ }
+'''
+
+EXAMPLES = r'''
+- name: Create a new ext4 primary partition
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ fs_type: ext4
+
+- name: Remove partition number 1
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: absent
+
+- name: Create a new primary partition with a size of 1GiB
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ part_end: 1GiB
+
+- name: Create a new primary partition for LVM
+ community.general.parted:
+ device: /dev/sdb
+ number: 2
+ flags: [ lvm ]
+ state: present
+ part_start: 1GiB
+
+- name: Create a new primary partition with a size of 1GiB at disk's end
+ community.general.parted:
+ device: /dev/sdb
+ number: 3
+ state: present
+ fs_type: ext3
+ part_start: -1GiB
+
+# Example on how to read info and reuse it in subsequent task
+- name: Read device information (always use unit when probing)
+ community.general.parted: device=/dev/sdb unit=MiB
+ register: sdb_info
+
+- name: Remove all partitions from disk
+ community.general.parted:
+ device: /dev/sdb
+ number: '{{ item.num }}'
+ state: absent
+ loop: '{{ sdb_info.partitions }}'
+
+- name: Extend an existing partition to fill all available space
+ community.general.parted:
+ device: /dev/sdb
+ number: "{{ sdb_info.partitions | length }}"
+ part_end: "100%"
+ resize: true
+ state: present
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import math
+import re
+import os
+
+
+# Reference prefixes (International System of Units and IEC)
+units_si = ['B', 'KB', 'MB', 'GB', 'TB']
+units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
+parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
+
+
+def parse_unit(size_str, unit=''):
+ """
+ Parses a string containing a size or boundary information
+ """
+ matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str)
+ if matches is None:
+ # "<cylinder>,<head>,<sector>" format
+ matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
+ if matches is None:
+ module.fail_json(
+ msg="Error interpreting parted size output: '%s'" % size_str
+ )
+
+ size = {
+ 'cylinder': int(matches.group(1)),
+ 'head': int(matches.group(2)),
+ 'sector': int(matches.group(3))
+ }
+ unit = 'chs'
+
+ else:
+ # Normal format: "<number>[<unit>]"
+ if matches.group(2) is not None:
+ unit = matches.group(2)
+
+ size = float(matches.group(1))
+
+ return size, unit
+
+
+def parse_partition_info(parted_output, unit):
+ """
+ Parses the output of parted and transforms the data into
+ a dictionary.
+
+ Parted Machine Parseable Output:
+ See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
+ 0573.html
+ - All lines end with a semicolon (;)
+ - The first line indicates the units in which the output is expressed.
+ CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
+ - The second line is made of disk information in the following format:
+ "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
+ e":"partition-table-type":"model-name";
+ - If the first line was either CYL or CHS, the next line will contain
+ information on no. of cylinders, heads, sectors and cylinder size.
+ - Partition information begins from the next line. This is of the format:
+ (for BYT)
+ "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
+ et";
+ (for CHS/CYL)
+ "number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
+ """
+ lines = [x for x in parted_output.split('\n') if x.strip() != '']
+
+ # Generic device info
+ generic_params = lines[1].rstrip(';').split(':')
+
+ # The unit is read once, because parted always returns the same unit
+ size, unit = parse_unit(generic_params[1], unit)
+
+ generic = {
+ 'dev': generic_params[0],
+ 'size': size,
+ 'unit': unit.lower(),
+ 'table': generic_params[5],
+ 'model': generic_params[6],
+ 'logical_block': int(generic_params[3]),
+ 'physical_block': int(generic_params[4])
+ }
+
+ # CYL and CHS have an additional line in the output
+ if unit in ['cyl', 'chs']:
+ chs_info = lines[2].rstrip(';').split(':')
+ cyl_size, cyl_unit = parse_unit(chs_info[3])
+ generic['chs_info'] = {
+ 'cylinders': int(chs_info[0]),
+ 'heads': int(chs_info[1]),
+ 'sectors': int(chs_info[2]),
+ 'cyl_size': cyl_size,
+ 'cyl_size_unit': cyl_unit.lower()
+ }
+ lines = lines[1:]
+
+ parts = []
+ for line in lines[2:]:
+ part_params = line.rstrip(';').split(':')
+
+ # CHS use a different format than BYT, but contrary to what stated by
+ # the author, CYL is the same as BYT. I've tested this undocumented
+ # behaviour down to parted version 1.8.3, which is the first version
+ # that supports the machine parseable output.
+ if unit != 'chs':
+ size = parse_unit(part_params[3])[0]
+ fstype = part_params[4]
+ name = part_params[5]
+ flags = part_params[6]
+
+ else:
+ size = ""
+ fstype = part_params[3]
+ name = part_params[4]
+ flags = part_params[5]
+
+ parts.append({
+ 'num': int(part_params[0]),
+ 'begin': parse_unit(part_params[1])[0],
+ 'end': parse_unit(part_params[2])[0],
+ 'size': size,
+ 'fstype': fstype,
+ 'name': name,
+ 'flags': [f.strip() for f in flags.split(', ') if f != ''],
+ 'unit': unit.lower(),
+ })
+
+ return {'generic': generic, 'partitions': parts}
+
+
+def format_disk_size(size_bytes, unit):
+ """
+ Formats a size in bytes into a different unit, like parted does. It doesn't
+ manage CYL and CHS formats, though.
+ This function has been adapted from https://github.com/Distrotech/parted/blo
+ b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
+ """
+ global units_si, units_iec
+
+ unit = unit.lower()
+
+ # Shortcut
+ if size_bytes == 0:
+ return 0.0, 'b'
+
+ # Cases where we default to 'compact'
+ if unit in ['', 'compact', 'cyl', 'chs']:
+ index = max(0, int(
+ (math.log10(size_bytes) - 1.0) / 3.0
+ ))
+ unit = 'b'
+ if index < len(units_si):
+ unit = units_si[index]
+
+ # Find the appropriate multiplier
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** units_iec.index(unit)
+
+ output = size_bytes // multiplier * (1 + 1E-16)
+
+ # Corrections to round up as per IEEE754 standard
+ if output < 10:
+ w = output + 0.005
+ elif output < 100:
+ w = output + 0.05
+ else:
+ w = output + 0.5
+
+ if w < 10:
+ precision = 2
+ elif w < 100:
+ precision = 1
+ else:
+ precision = 0
+
+ # Round and return
+ return round(output, precision), unit
+
+
+def convert_to_bytes(size_str, unit):
+ size = float(size_str)
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** (units_iec.index(unit) + 1)
+ elif unit in ['', 'compact', 'cyl', 'chs']:
+ # As per format_disk_size, default to compact, which defaults to megabytes
+ multiplier = 1000.0 ** units_si.index("MB")
+
+ output = size * multiplier
+ return int(output)
+
+
+def get_unlabeled_device_info(device, unit):
+ """
+ Fetches device information directly from the kernel and it is used when
+ parted cannot work because of a missing label. It always returns a 'unknown'
+ label.
+ """
+ device_name = os.path.basename(device)
+ base = "/sys/block/%s" % device_name
+
+ vendor = read_record(base + "/device/vendor", "Unknown")
+ model = read_record(base + "/device/model", "model")
+ logic_block = int(read_record(base + "/queue/logical_block_size", 0))
+ phys_block = int(read_record(base + "/queue/physical_block_size", 0))
+ size_bytes = int(read_record(base + "/size", 0)) * logic_block
+
+ size, unit = format_disk_size(size_bytes, unit)
+
+ return {
+ 'generic': {
+ 'dev': device,
+ 'table': "unknown",
+ 'size': size,
+ 'unit': unit,
+ 'logical_block': logic_block,
+ 'physical_block': phys_block,
+ 'model': "%s %s" % (vendor, model),
+ },
+ 'partitions': []
+ }
+
+
+def get_device_info(device, unit):
+ """
+ Fetches information about a disk and its partitions and it returns a
+ dictionary.
+ """
+ global module, parted_exec
+
+ # If parted complains about missing labels, it means there are no partitions.
+ # In this case only, use a custom function to fetch information and emulate
+ # parted formats for the unit.
+ label_needed = check_parted_label(device)
+ if label_needed:
+ return get_unlabeled_device_info(device, unit)
+
+ command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ rc, out, err = module.run_command(command)
+ if rc != 0 and 'unrecognised disk label' not in err:
+ module.fail_json(msg=(
+ "Error while getting device information with parted "
+ "script: '%s'" % command),
+ rc=rc, out=out, err=err
+ )
+
+ return parse_partition_info(out, unit)
+
+
+def check_parted_label(device):
+ """
+ Determines if parted needs a label to complete its duties. Versions prior
+ to 3.1 don't return data when there is no label. For more information see:
+ http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
+ """
+ global parted_exec
+
+ # Check the version
+ parted_major, parted_minor, _ = parted_version()
+ if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
+ return False
+
+ # Older parted versions return a message in the stdout and RC > 0.
+ rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ if rc != 0 and 'unrecognised disk label' in out.lower():
+ return True
+
+ return False
+
+
+def parse_parted_version(out):
+ """
+ Returns version tuple from the output of "parted --version" command
+ """
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ return None, None, None
+
+ # Sample parted versions (see as well test unit):
+ # parted (GNU parted) 3.3
+ # parted (GNU parted) 3.4.5
+ # parted (GNU parted) 3.3.14-dfc61
+ matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip())
+
+ if matches is None:
+ return None, None, None
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+
+def parted_version():
+ """
+ Returns the major and minor version of parted installed on the system.
+ """
+ global module, parted_exec
+
+ rc, out, err = module.run_command("%s --version" % parted_exec)
+ if rc != 0:
+ module.fail_json(
+ msg="Failed to get parted version.", rc=rc, out=out, err=err
+ )
+
+ (major, minor, rev) = parse_parted_version(out)
+ if major is None:
+ module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
+
+ return major, minor, rev
+
+
+def parted(script, device, align):
+ """
+ Runs a parted script.
+ """
+ global module, parted_exec
+
+ align_option = '-a %s' % align
+ if align == 'undefined':
+ align_option = ''
+
+ if script and not module.check_mode:
+ command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ rc, out, err = module.run_command(command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Error while running parted script: %s" % command.strip(),
+ rc=rc, out=out, err=err
+ )
+
+
+def read_record(file_path, default=None):
+ """
+ Reads the first line of a file and returns it.
+ """
+ try:
+ f = open(file_path, 'r')
+ try:
+ return f.readline().strip()
+ finally:
+ f.close()
+ except IOError:
+ return default
+
+
+def part_exists(partitions, attribute, number):
+ """
+ Looks if a partition that has a specific value for a specific attribute
+ actually exists.
+ """
+ return any(
+ part[attribute] and
+ part[attribute] == number for part in partitions
+ )
+
+
+def check_size_format(size_str):
+ """
+ Checks if the input string is an allowed size
+ """
+ size, unit = parse_unit(size_str)
+ return unit in parted_units
+
+
+def main():
+ global module, units_si, units_iec, parted_exec
+
+ changed = False
+ output_script = ""
+ script = ""
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='str', required=True),
+ align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']),
+ number=dict(type='int'),
+
+ # unit <unit> command
+ unit=dict(type='str', default='KiB', choices=parted_units),
+
+ # mklabel <label-type> command
+ label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
+
+ # mkpart <part-type> [<fs-type>] <start> <end> command
+ part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
+ part_start=dict(type='str', default='0%'),
+ part_end=dict(type='str', default='100%'),
+ fs_type=dict(type='str'),
+
+ # name <partition> <name> command
+ name=dict(type='str'),
+
+ # set <partition> <flag> <state> command
+ flags=dict(type='list', elements='str'),
+
+ # rm/mkpart command
+ state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
+
+ # resize part
+ resize=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ['state', 'present', ['number']],
+ ['state', 'absent', ['number']],
+ ],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
+
+ # Data extraction
+ device = module.params['device']
+ align = module.params['align']
+ number = module.params['number']
+ unit = module.params['unit']
+ label = module.params['label']
+ part_type = module.params['part_type']
+ part_start = module.params['part_start']
+ part_end = module.params['part_end']
+ name = module.params['name']
+ state = module.params['state']
+ flags = module.params['flags']
+ fs_type = module.params['fs_type']
+ resize = module.params['resize']
+
+ # Parted executable
+ parted_exec = module.get_bin_path('parted', True)
+
+ # Conditioning
+ if number is not None and number < 1:
+ module.fail_json(msg="The partition number must be greater then 0.")
+ if not check_size_format(part_start):
+ module.fail_json(
+ msg="The argument 'part_start' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_start)
+ )
+ if not check_size_format(part_end):
+ module.fail_json(
+ msg="The argument 'part_end' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_end)
+ )
+
+ # Read the current disk information
+ current_device = get_device_info(device, unit)
+ current_parts = current_device['partitions']
+
+ if state == 'present':
+
+ # Assign label if required
+ mklabel_needed = current_device['generic'].get('table', None) != label
+ if mklabel_needed:
+ script += "mklabel %s " % label
+
+ # Create partition if required
+ if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)):
+ script += "mkpart %s %s%s %s " % (
+ part_type,
+ '%s ' % fs_type if fs_type is not None else '',
+ part_start,
+ part_end
+ )
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # If partition exists, try to resize
+ if resize and part_exists(current_parts, 'num', number):
+ # Ensure new end is different to current
+ partition = [p for p in current_parts if p['num'] == number][0]
+ current_part_end = convert_to_bytes(partition['end'], unit)
+
+ size, parsed_unit = parse_unit(part_end, unit)
+ if parsed_unit == "%":
+ size = int((int(current_device['generic']['size']) * size) / 100)
+ parsed_unit = unit
+
+ desired_part_end = convert_to_bytes(size, parsed_unit)
+
+ if current_part_end != desired_part_end:
+ script += "resizepart %s %s " % (
+ number,
+ part_end
+ )
+
+ # Execute the script and update the data structure.
+ # This will create the partition for the next steps
+ if script:
+ output_script += script
+ parted(script, device, align)
+ changed = True
+ script = ""
+
+ if not module.check_mode:
+ current_parts = get_device_info(device, unit)['partitions']
+
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ if changed and module.check_mode:
+ partition = {'flags': []} # Empty structure for the check-mode
+ else:
+ partition = [p for p in current_parts if p['num'] == number][0]
+
+ # Assign name to the partition
+ if name is not None and partition.get('name', None) != name:
+ # Wrap double quotes in single quotes so the shell doesn't strip
+ # the double quotes as those need to be included in the arg
+ # passed to parted
+ script += 'name %s \'"%s"\' ' % (number, name)
+
+ # Manage flags
+ if flags:
+ # Parted infers boot with esp, if you assign esp, boot is set
+ # and if boot is unset, esp is also unset.
+ if 'esp' in flags and 'boot' not in flags:
+ flags.append('boot')
+
+ # Compute only the changes in flags status
+ flags_off = list(set(partition['flags']) - set(flags))
+ flags_on = list(set(flags) - set(partition['flags']))
+
+ for f in flags_on:
+ script += "set %s %s on " % (number, f)
+
+ for f in flags_off:
+ script += "set %s %s off " % (number, f)
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # Execute the script
+ if script:
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'absent':
+ # Remove the partition
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ script = "rm %s " % number
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'info':
+ output_script = "unit '%s' print " % unit
+
+ # Final status of the device
+ final_device_status = get_device_info(device, unit)
+ module.exit_json(
+ changed=changed,
+ disk=final_device_status['generic'],
+ partitions=final_device_status['partitions'],
+ script=output_script.strip()
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py
new file mode 100644
index 00000000..fef04d32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <https://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+author:
+ - Jonathan Lestrelin (@jle64) <jonathan.lestrelin@gmail.com>
+options:
+ name:
+ type: str
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+ aliases: [pkg]
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "installed", "latest", "absent", "removed"]
+ executable:
+ type: path
+ description:
+ - Path to the pear executable.
+ prompts:
+ description:
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
+ - Prompts will be processed in the same order as the packages list.
+ - You can optionnally specify an answer to any question in the list.
+ - If no answer is provided, the list item will only contain the regular expression.
+ - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - You can provide a list containing items with or without answer.
+ - A prompt list can be shorter or longer than the packages list but will issue a warning.
+ - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ type: list
+ elements: raw
+ version_added: 0.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install pear package
+ community.general.pear:
+ name: Net_URL2
+ state: present
+
+- name: Install pecl package
+ community.general.pear:
+ name: pecl/json_post
+ state: present
+
+- name: Install pecl package with expected prompt
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]
+
+- name: Install pecl package with expected prompt and an answer
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once with prompts.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - I am a test prompt because gnupg doesnt asks anything
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once skipping the first prompt.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - null
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Upgrade package
+ community.general.pear:
+ name: Net_URL2
+ state: latest
+
+- name: Remove packages
+ community.general.pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-':
+ continue
+ return installed
+ return None
+
+
+def _get_pear_path(module):
+ if module.params['executable'] and os.path.isfile(module.params['executable']):
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('pear', True, [module.params['executable']])
+ return result
+
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "%s info %s" % (_get_pear_path(module), name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "%s uninstall %s" % (_get_pear_path(module), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr)))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages, prompts):
+ install_c = 0
+ has_prompt = bool(prompts)
+ default_stdin = "\n"
+
+ if has_prompt:
+ nb_prompts = len(prompts)
+ nb_packages = len(packages)
+
+ if nb_prompts > 0 and (nb_prompts != nb_packages):
+ if nb_prompts > nb_packages:
+ diff = nb_prompts - nb_packages
+ msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ else:
+ diff = nb_packages - nb_prompts
+ msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \
+ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ module.warn(msg)
+
+ # Preparing prompts answer according to item type
+ tmp_prompts = []
+ for _item in prompts:
+ # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # We also expect here that the dict only has ONE key and the first key will be taken
+ if isinstance(_item, dict):
+ key = list(_item.keys())[0]
+ answer = _item[key] + "\n"
+
+ tmp_prompts.append((key, answer))
+ elif not _item:
+ tmp_prompts.append((None, default_stdin))
+ else:
+ tmp_prompts.append((_item, default_stdin))
+ prompts = tmp_prompts
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ if has_prompt and i < len(prompts):
+ prompt_regex = prompts[i][0]
+ data = prompts[i][1]
+ else:
+ prompt_regex = None
+ data = default_stdin
+
+ cmd = "%s %s %s" % (_get_pear_path(module), command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr)))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path'),
+ prompts=dict(default=None, required=False, type='list', elements='raw'),
+ ),
+ supports_check_mode=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs, p["prompts"])
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py
new file mode 100644
index 00000000..1bee180b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pids
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ community.general.pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ ansible.builtin.debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py
new file mode 100644
index 00000000..23ed2545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: [ "running", "paused", "started", "stopped" ]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+- name: Pause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
+
+- name: Unpause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
+'''
+
+import traceback
+
+PINGDOM_IMP_ERR = None
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except Exception:
+ PINGDOM_IMP_ERR = traceback.format_exc()
+ HAS_PINGDOM = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True, no_log=True),
+ key=dict(required=True, no_log=True),
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR)
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py
new file mode 100644
index 00000000..08eb2e95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# started out with AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pip_package_info
+short_description: pip package information
+description:
+ - Return information about installed pip packages
+options:
+ clients:
+ description:
+ - A list of the pip executables that will be used to get the packages.
+ They can be supplied with the full path or just the executable name, i.e `pip3.7`.
+ default: ['pip']
+ required: False
+ type: list
+requirements:
+ - The requested pip executables must be installed on the target.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Just get the list from default pip
+ community.general.pip_package_info:
+
+- name: Get the facts for default pip, pip2 and pip3.6
+ community.general.pip_package_info:
+ clients: ['pip', 'pip2', 'pip3.6']
+
+- name: Get from specific paths (virtualenvs?)
+ community.general.pip_package_info:
+ clients: '/home/me/projec42/python/pip3.5'
+'''
+
+RETURN = '''
+packages:
+ description: a dictionary of installed package data
+ returned: always
+ type: dict
+ contains:
+ python:
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
+ returned: always
+ type: dict
+ sample:
+ "packages": {
+ "pip": {
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ],
+ },
+ }
+'''
+import json
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.packages import CLIMgr
+
+
+class PIP(CLIMgr):
+
+ def __init__(self, pip):
+
+ self.CLI = pip
+
+ def list_installed(self):
+ global module
+ rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json'])
+ if rc != 0:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return json.loads(out)
+
+ def get_package_details(self, package):
+ package['source'] = self.CLI
+ return package
+
+
+def main():
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True)
+ packages = {}
+ results = {'packages': {}}
+ clients = module.params['clients']
+
+ found = 0
+ for pip in clients:
+
+ if not os.path.basename(pip).startswith('pip'):
+ module.warn('Skipping invalid pip client: %s' % (pip))
+ continue
+ try:
+ pip_mgr = PIP(pip)
+ if pip_mgr.is_available():
+ found += 1
+ packages[pip] = pip_mgr.get_packages()
+ except Exception as e:
+ module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e)))
+ continue
+
+ if found == 0:
+ module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients)
+
+ # return info
+ results['packages'] = packages
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py
new file mode 100644
index 00000000..266c073f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Peter Oliver <ansible@mavit.org.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author:
+- Peter Oliver (@mavit)
+short_description: Manages packages with the Solaris 11 Image Packaging System
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ choices: [ absent, latest, present, installed, removed, uninstalled ]
+ default: present
+ type: str
+ accept_licenses:
+ description:
+ - Accept any licences.
+ type: bool
+ default: no
+ aliases: [ accept, accept_licences ]
+ be_name:
+ description:
+ - Creates a new boot environment with the given name.
+ type: str
+ refresh:
+ description:
+ - Refresh publishers before execution.
+ type: bool
+ default: yes
+'''
+EXAMPLES = '''
+- name: Install Vim
+ community.general.pkg5:
+ name: editor/vim
+
+- name: Install Vim without refreshing publishers
+ community.general.pkg5:
+ name: editor/vim
+ refresh: no
+
+- name: Remove finger daemon
+ community.general.pkg5:
+ name: service/network/finger
+ state: absent
+
+- name: Install several packages at once
+ community.general.pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']),
+ accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
+ be_name=dict(type='str'),
+ refresh=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: (
+ not is_installed(module, p) or not is_latest(module, p)
+ ),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if module.check_mode:
+ dry_run = ['-n']
+ else:
+ dry_run = []
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ if params['be_name']:
+ beadm = ['--be-name=' + module.params['be_name']]
+ else:
+ beadm = []
+
+ if params['refresh']:
+ no_refresh = []
+ else:
+ no_refresh = ['--no-refresh']
+
+ to_modify = list(filter(behaviour[state]['filter'], packages))
+ if to_modify:
+ rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc == 4:
+ response['changed'] = False
+ response['failed'] = False
+ elif rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
new file mode 100644
index 00000000..95d57765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ type: str
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ type: bool
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ type: bool
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+'''
+EXAMPLES = '''
+- name: Fetch packages for the solaris publisher direct from Oracle
+ community.general.pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
+
+- name: Configure a publisher for locally-produced packages
+ community.general.pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list', elements='str'),
+ mirror=dict(type='list', elements='str'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] is not None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] is not None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] is not None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] is not None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] is not None:
+ args.append('--non-sticky')
+
+ if params['enabled'] is not None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] is not None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if publisher not in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if name not in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py
new file mode 100644
index 00000000..2937314f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al.
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+author:
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ type: bool
+ default: no
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ type: bool
+ default: no
+ clean:
+ description:
+ - Clean packages cache
+ type: bool
+ default: no
+ force:
+ description:
+ - Force package reinstall
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgin:
+ name: foo
+ state: present
+
+- name: Install specific version of foo package
+ community.general.pkgin:
+ name: foo-2.0.1
+ state: present
+
+- name: Update cache and install foo package
+ community.general.pkgin:
+ name: foo
+ update_cache: yes
+
+- name: Remove package foo
+ community.general.pkgin:
+ name: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.pkgin:
+ name: foo,bar
+ state: absent
+
+- name: Update repositories as a separate step
+ community.general.pkgin:
+ update_cache: yes
+
+- name: Upgrade main packages (equivalent to pkgin upgrade)
+ community.general.pkgin:
+ upgrade: yes
+
+- name: Upgrade all packages (equivalent to pkgin full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+
+- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+ force: yes
+
+- name: Clean packages cache (equivalent to pkgin clean)
+ community.general.pkgin:
+ clean: yes
+'''
+
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class PackageState(object):
+ PRESENT = 1
+ NOT_INSTALLED = 2
+ OUTDATED = 4
+ NOT_FOUND = 8
+
+
+def query_package(module, name):
+ """Search for the package by name and return state of the package.
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name not in (pkgname_with_version, pkgname_without_version):
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return PackageState.OUTDATED
+ elif raw_state == '=' or raw_state == '>':
+ return PackageState.PRESENT
+ else:
+ # Package found but not installed
+ return PackageState.NOT_INSTALLED
+ # no fall-through
+
+ # No packages were matched
+ return PackageState.NOT_FOUND
+
+ # Search failed
+ return PackageState.NOT_FOUND
+
+
+def format_action_message(module, action, count):
+ vars = {"actioned": action,
+ "count": count}
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]:
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ query_result = query_package(module, package)
+ if query_result in [PackageState.PRESENT, PackageState.OUTDATED]:
+ continue
+ elif query_result is PackageState.NOT_FOUND:
+ module.fail_json(msg="failed to find package %s for installation" % package)
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "database is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ full_upgrade=dict(default=False, type='bool'),
+ clean=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py
new file mode 100644
index 00000000..d5ed4a0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops."
+ - >
+ Warning: In Ansible 2.9 and earlier this module had a misfeature
+ where I(name=*) with I(state: latest) or I(state: present) would
+ install every package from every package repository, filling up
+ the machines disk. Avoid using them unless you are certain that
+ your role will only be used with newer versions.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ type: str
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
+ - Defines the C(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: no
+ version_added: 1.3.0
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ community.general.pkgng:
+ name: baz
+ state: latest
+
+- name: Upgrade all installed packages (see warning for the name option first!)
+ community.general.pkgng:
+ name: "*"
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def upgrade_packages(module, pkgng_path, dir_arg):
+ # Run a 'pkg upgrade', updating all packages.
+ upgraded_c = 0
+
+ cmd = "%s %s upgrade -y" % (pkgng_path, dir_arg)
+ if module.check_mode:
+ cmd += " -n"
+ rc, out, err = module.run_command(cmd)
+
+ match = re.search('^Number of packages to be upgraded: ([0-9]+)', out, re.MULTILINE)
+ if match:
+ upgraded_c = int(match.group(1))
+
+ if upgraded_c > 0:
+ return (True, "updated %s package(s)" % upgraded_c, out, err)
+ return (False, "no packages need upgrades", out, err)
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+ remove_c = 0
+ stdout = ""
+ stderr = ""
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c, stdout, stderr)
+
+ return (False, "package(s) already absent", stdout, stderr)
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
+ install_c = 0
+ stdout = ""
+ stderr = ""
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if ignoreosver:
+ # Ignore FreeBSD OS version check,
+ # useful on -STABLE and -CURRENT branches.
+ batch_var = batch_var + ' IGNORE_OSVERSION=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s %s update" % (batch_var, pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr)
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c), stdout, stderr)
+
+ return (False, "package(s) already %s" % (state), stdout, stderr)
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ stdout = ""
+ stderr = ""
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return (False, "no package(s) to autoremove", stdout, stderr)
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+
+ return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ cached=dict(default=False, type='bool'),
+ ignore_osver=dict(default=False, required=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ stdout = ""
+ stderr = ""
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["ignore_osver"]:
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 11, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater")
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if pkgs == ['*'] and p["state"] == 'latest':
+ # Operate on all installed packages. Only state: latest makes sense here.
+ _changed, _msg, _stdout, _stderr = upgrade_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ # Operate on named packages
+ named_packages = [pkg for pkg in pkgs if pkg != '*']
+ if p["state"] in ("present", "latest") and named_packages:
+ _changed, _msg, _out, _err = install_packages(module, pkgng_path, named_packages,
+ p["cached"], p["pkgsite"], dir_arg,
+ p["state"], p["ignore_osver"])
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent" and named_packages:
+ _changed, _msg, _out, _err = remove_packages(module, pkgng_path, named_packages, dir_arg)
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg, _stdout, _stderr = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py
new file mode 100644
index 00000000..9ec0ebaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: pkgutil
+short_description: OpenCSW package management on Solaris
+description:
+- This module installs, updates and removes packages from the OpenCSW project for Solaris.
+- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+- See U(https://www.opencsw.org/) for more information about the project.
+author:
+- Alexander Winkler (@dermute)
+- David Ponessa (@scathatheworm)
+options:
+ name:
+ description:
+ - The name of the package.
+ - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ type: list
+ required: true
+ elements: str
+ aliases: [ pkg ]
+ site:
+ description:
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
+ - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ type: str
+ required: true
+ choices: [ absent, installed, latest, present, removed ]
+ update_catalog:
+ description:
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes).
+ type: bool
+ default: no
+ force:
+ description:
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes).
+ - This is useful for rolling back to stable from testing, or similar operations.
+ type: bool
+ default: false
+ version_added: 1.2.0
+notes:
+- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+'''
+
+EXAMPLES = r'''
+- name: Install a package
+ community.general.pkgutil:
+ name: CSWcommon
+ state: present
+
+- name: Install a package from a specific repository
+ community.general.pkgutil:
+ name: CSWnrpe
+ site: ftp://myinternal.repo/opencsw/kiel
+ state: latest
+
+- name: Remove a package
+ community.general.pkgutil:
+ name: CSWtop
+ state: absent
+
+- name: Install several packages
+ community.general.pkgutil:
+ name:
+ - CSWsudo
+ - CSWtop
+ state: present
+
+- name: Update all packages
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+
+- name: Update all packages and force versions to match latest in catalog
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+ force: yes
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def packages_not_installed(module, names):
+ ''' Check if each package is installed and return list of the ones absent '''
+ pkgs = []
+ for pkg in names:
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc != 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_installed(module, names):
+ ''' Check if each package is installed and return list of the ones present '''
+ pkgs = []
+ for pkg in names:
+ if not pkg.startswith('CSW'):
+ continue
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc == 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_not_latest(module, names, site, update_catalog):
+ ''' Check status of each package and return list of the ones with an upgrade available '''
+ cmd = ['pkgutil']
+ if update_catalog:
+ cmd.append('-U')
+ cmd.append('-c')
+ if site is not None:
+ cmd.extend('-t', site)
+ if names != ['*']:
+ cmd.extend(names)
+ rc, out, err = run_command(module, cmd)
+
+ # Find packages in the catalog which are not up to date
+ packages = []
+ for line in out.split('\n')[1:-1]:
+ if 'catalog' not in line and 'SAME' not in line:
+ packages.append(line.split(' ')[0])
+
+ # Remove duplicates
+ return list(set(packages))
+
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+
+def package_install(module, state, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-iy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def package_upgrade(module, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-uy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd += pkgs
+ return run_command(module, cmd)
+
+
+def package_uninstall(module, pkgs):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-ry')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ site=dict(type='str'),
+ update_catalog=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ force = module.params['force']
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ if state in ['installed', 'present']:
+ # Fail with an explicit error when trying to "install" '*'
+ if name == ['*']:
+ module.fail_json(msg="Can not use 'state: present' with name: '*'")
+
+ # Build list of packages that are actually not installed from the ones requested
+ pkgs = packages_not_installed(module, name)
+
+ # If the package list is empty then all packages are already present
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['latest']:
+ # When using latest for *
+ if name == ['*']:
+ # Check for packages that are actually outdated
+ pkgs = packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list comes up empty, everything is already up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ # If there are packages to update, just empty the list and run the command without it
+ # pkgutil logic is to update all when run without packages names
+ pkgs = []
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+ else:
+ # Build list of packages that are either outdated or not installed
+ pkgs = packages_not_installed(module, name)
+ pkgs += packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list is empty that means all packages are installed and up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['absent', 'removed']:
+ # Build list of packages requested for removal that are actually present
+ pkgs = packages_installed(module, name)
+
+ # If the list is empty, no packages need to be removed
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_uninstall(module, pkgs)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent/up to date
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py
new file mode 100644
index 00000000..1f0fdc68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, William L Thomson Jr
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ aliases: [name]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - State of the package atom
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ type: str
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ type: bool
+ default: no
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ type: bool
+ default: no
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ type: bool
+ default: no
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ type: bool
+ default: no
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ type: bool
+ default: no
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ type: bool
+ default: yes
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ type: bool
+ default: no
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ type: bool
+ default: no
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ type: bool
+ default: no
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ type: bool
+ default: no
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ type: bool
+ default: no
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ choices: [ "web", "yes", "no" ]
+ type: str
+
+ getbinpkgonly:
+ description:
+ - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+ version_added: 1.3.0
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling).
+ type: bool
+ default: no
+
+ usepkg:
+ description:
+ - Tries to use the binary package(s) in the locally available packages directory.
+ type: bool
+ default: no
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ type: bool
+ default: no
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --jobs setting values
+ type: int
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --load-average setting values
+ type: float
+
+ quietbuild:
+ description:
+ - Redirect all build output to logs alone, and do not display it
+ - on stdout (--quiet-build)
+ type: bool
+ default: no
+
+ quietfail:
+ description:
+ - Suppresses display of the build log on stdout (--quiet-fail)
+ - Only the die message and the path of the build log will be
+ - displayed on stdout.
+ type: bool
+ default: no
+
+requirements: [ gentoolkit ]
+author:
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+'''
+
+EXAMPLES = '''
+- name: Make sure package foo is installed
+ community.general.portage:
+ package: foo
+ state: present
+
+- name: Make sure package foo is not installed
+ community.general.portage:
+ package: foo
+ state: absent
+
+- name: Update package foo to the latest version (os specific alternative to latest)
+ community.general.portage:
+ package: foo
+ update: yes
+
+- name: Install package foo using PORTAGE_BINHOST setup
+ community.general.portage:
+ package: foo
+ getbinpkg: yes
+
+- name: Re-install world from binary packages only and do not allow any compiling
+ community.general.portage:
+ package: '@world'
+ usepkgonly: yes
+
+- name: Sync repositories and update world
+ community.general.portage:
+ package: '@world'
+ update: yes
+ deep: yes
+ sync: yes
+
+- name: Remove unneeded packages
+ community.general.portage:
+ depclean: yes
+
+- name: Remove package foo if it is not explicitly needed
+ community.general.portage:
+ package: foo
+ state: absent
+ depclean: yes
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ """Run emerge command against given list of atoms."""
+ p = module.params
+
+ if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
+ for package in packages:
+ if p['noreplace'] and not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkgonly': '--getbinpkgonly',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
+ 'quietbuild': '--quiet-build',
+ 'quietfail': '--quiet-fail',
+ }
+ for flag, arg in emerge_flags.items():
+ if p[flag]:
+ args.append(arg)
+
+ if p['state'] and p['state'] == 'latest':
+ args.append("--update")
+
+ emerge_flags = {
+ 'jobs': '--jobs',
+ 'loadavg': '--load-average',
+ }
+
+ for flag, arg in emerge_flags.items():
+ flag_val = p[flag]
+
+ if flag_val is None:
+ """Fallback to default: don't use this argument at all."""
+ continue
+
+ if not flag_val:
+ """If the value is 0 or 0.0: add the flag, but not the value."""
+ args.append(arg)
+ continue
+
+ """Add the --flag=value pair."""
+ args.extend((arg, to_native(flag_val)))
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed', 'latest']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(type='list', elements='str', default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=True, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web', 'no']),
+ getbinpkgonly=dict(default=False, type='bool'),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
+ quietbuild=dict(default=False, type='bool'),
+ quietfail=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[
+ ['nodeps', 'onlydeps'],
+ ['quiet', 'verbose'],
+ ['quietbuild', 'verbose'],
+ ['quietfail', 'verbose'],
+ ],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync'] and p['sync'].strip() != 'no':
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'])
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py
new file mode 100644
index 00000000..d1c33cc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ type: bool
+ required: false
+ default: yes
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.portinstall:
+ name: foo
+ state: present
+
+- name: Install package security/cyrus-sasl2-saslauthd
+ community.general.portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.portinstall:
+ name: foo,bar
+ state: absent
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ # counts the number of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
+ shlex_quote(name_without_digits)),
+ use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages:
+ portinstall_params = "--use-packages"
+ else:
+ portinstall_params = ""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default=True)))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py
new file mode 100644
index 00000000..bf66f3d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: no
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+- if I(check_mode=yes), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+- If i(check_mode=yes) and the source has been passed as SQL, the module
+ will execute it and rolled the transaction back but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.general.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.general.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.general.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: yes
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.general.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py
new file mode 100644
index 00000000..8fde39ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.general.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ community.general.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.general.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.general.postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py
new file mode 100644
index 00000000..3fa82dac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: no
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified, the latest extension version will be created.
+ - It can't downgrade an extension version.
+ When version downgrade is needed, remove the extension and create new one with appropriate version.
+ - Set I(version=latest) to update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+notes:
+- The default authentication assumes that you are either logging in as
+ or sudo'ing to the C(postgres) account on the host.
+- This module uses I(psycopg2), a Python PostgreSQL database adapter.
+- You must ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case),
+ then PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
+ and C(python-psycopg2) packages on the remote host before using this module.
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+requirements: [ psycopg2 ]
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.general.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.general.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: yes
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it if it's already created
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Assuming extension foo is created, update it to the latest version
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+from distutils.version import LooseVersion
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+
+def ext_delete(cursor, ext, cascade):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version:
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the current created extension version and available versions.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ current_version = '0'
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchall()
+
+ available_versions = parse_ext_versions(current_version, res)
+
+ if current_version == '0':
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def parse_ext_versions(current_version, ext_ver_list):
+ """Parse ext versions.
+
+ Args:
+ current_version (str) -- version to compare elements of ext_ver_list with
+ ext_ver_list (list) -- list containing dicts with versions
+
+ Return a sorted list with versions that are higher than current_version.
+
+ Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
+ """
+ available_versions = []
+
+ for line in ext_ver_list:
+ if line['version'] == 'unpackaged':
+ continue
+
+ try:
+ if LooseVersion(line['version']) > LooseVersion(current_version):
+ available_versions.append(line['version'])
+ except Exception:
+ # When a version cannot be compared, skip it
+ # (there's a note in the documentation)
+ continue
+
+ return sorted(available_versions, key=LooseVersion)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+ if version == 'latest':
+ if available_versions:
+ version = available_versions[-1]
+ else:
+ version = ''
+
+ if version:
+ # If the specific version is passed and it is not available for update:
+ if version not in available_versions:
+ if not curr_version:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+
+ elif LooseVersion(curr_version) == LooseVersion(version):
+ changed = False
+
+ else:
+ module.fail_json(msg="Passed version '%s' is lower than "
+ "the current created version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+
+ # If the specific version is passed and it is higher that the current version:
+ if curr_version:
+ if LooseVersion(curr_version) < LooseVersion(version):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+
+ # If the specific version is passed and it is created now:
+ if curr_version == version:
+ changed = False
+
+ # If the ext doesn't exist and installed:
+ elif not curr_version and available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ if not curr_version:
+ # If the ext doesn't exist and it's installed:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If the ext doesn't exist and not installed:
+ else:
+ module.fail_json(msg="Extension %s is not installed" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py
new file mode 100644
index 00000000..6ffee31d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=yes).
+ type: bool
+ default: yes
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: no
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ required: false
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=yes)
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.general.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.general.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: no
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: yes
+ concurrent: no
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.general.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: yes
+ concurrent: no
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py
new file mode 100644
index 00000000..aeec8651
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_ping
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: yes
+ become_user: pgsql
+ community.general.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0 } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn = connect_to_db(self.module, conn_params)
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ self.db_conn.close()
+
+ self.module.params['database'] = dbname
+ return self.connect()
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+ else:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver = i[1].split('.')
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]),
+ minor=int(ext_ver[1]),
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a master."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r "
+ "JOIN pg_authid AS a ON r.usesysid = a.oid")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, a.rolname, l.lanacl "
+ "FROM pg_language AS l "
+ "JOIN pg_authid AS a ON l.lanowner = a.oid")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, a.rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n "
+ "JOIN pg_authid AS a ON a.oid = n.nspowner")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ raw = raw.split()[1].split('.')
+ self.pg_info["version"] = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql("SHOW %s" % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py
new file mode 100644
index 00000000..8b28cd9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=yes) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=no) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'no'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'no'
+ fail_on_drop:
+ description:
+ - If C(yes), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'yes'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'no'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.general.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: yes
+ force_trust: yes
+
+- name: Remove language pltclu from database testdb
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: yes
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: no
+
+- name: In testdb change owner of mylang to alice
+ community.general.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="no"),
+ force_trust=dict(type="bool", default="no"),
+ cascade=dict(type="bool", default="no"),
+ fail_on_drop=dict(type="bool", default="yes"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="yes")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=no to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py
new file mode 100644
index 00000000..3292a6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.general.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.general.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
+ default: yes
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.general.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.general.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: no
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py
new file mode 100644
index 00000000..06a09c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py
@@ -0,0 +1,453 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: yes
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - The list of role names. The ownership of all the objects within the current database,
+ and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
+ - Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(yes), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: yes
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.general.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all object in database bar owned by bob to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all object in database bar owned by bob and bill to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py
new file mode 100644
index 00000000..1f484bcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py
@@ -0,0 +1,745 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl ]
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.general 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.general.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+ - The module supports a check mode and a diff mode.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+author: Sebastiaan Mannem (@sebasmannem)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.read()
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ line = line.strip()
+ # uncomment
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ self.comment.append('#' + comment)
+ try:
+ self.add_rule(PgHbaRule(line=line))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, __path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
+ result = comment + '\n' + rule_lines
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is no valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ contype = module.params["contype"]
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ backup_file = module.params['backup_file']
+ databases = module.params["databases"]
+ dest = module.params["dest"]
+
+ method = module.params["method"]
+ netmask = module.params["netmask"]
+ options = module.params["options"]
+ order = module.params["order"]
+ source = module.params["address"]
+ state = module.params["state"]
+ users = module.params["users"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if contype:
+ try:
+ for database in databases.split(','):
+ for user in users.split(','):
+ rule = PgHbaRule(contype, database, user, source, netmask, method, options)
+ if state == "present":
+ ret['msgs'].append('Adding')
+ pg_hba.add_rule(rule)
+ else:
+ ret['msgs'].append('Removing')
+ pg_hba.remove_rule(rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(backup_file):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py
new file mode 100644
index 00000000..240cea57
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_info
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: PostgreSQL ping dbsrv server using not default credentials and ssl
+ community.general.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 10, minor: 1 }
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+ if raw:
+ self.is_available = True
+ raw = raw.split()[1].split('.')
+ self.version = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ )
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ db_connection.rollback()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py
new file mode 100644
index 00000000..e8d64f36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8)
+ - C(procedure) is supported since PostgreSQL 11 and M(community.general) collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is I(function) or I(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases:
+ - ssl_rootcert
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(no).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: yes
+ version_added: '1.2.0'
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- community.general.postgres
+
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.general.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ community.general.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.general.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.general.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.general.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.general.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.general 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.general 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: prucedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.general 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.general.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ if schema_qualifier:
+ schema_qualifier = '"%s"' % schema_qualifier
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py
new file mode 100644
index 00000000..1db80adc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed. If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- PostgreSQL version must be 10 or greater.
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables.
+ Allowable DML operations are INSERT and UPDATE only
+ community.general.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py
new file mode 100644
index 00000000..e231fbd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(community.general.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+seealso:
+- module: community.general.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.general.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.general.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.general.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.general.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.general.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.general.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.general.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ query_list = []
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ if ';' in query:
+ query_list = [q for q in query.split(';') if q != '\n']
+ else:
+ query_list.append(query)
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ query_list.append(query)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, arguments)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py
new file mode 100644
index 00000000..e7f28ecf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.general.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.general.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.general.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: yes
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py
new file mode 100644
index 00000000..50cd628a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: no
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: no
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.general.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.general.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.general.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.general.postgresql_sequence:
+ name: foobar
+ cycle: yes
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.general.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.general.postgresql_sequence:
+ name: foobar
+ cascade: yes
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: '-1'
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: '1'
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: '9223372036854775807'
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: '12'
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: str
+ sample: 'NO'
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py
new file mode 100644
index 00000000..737bded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py
@@ -0,0 +1,447 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: community.general.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.general.postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.general.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.general.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.general.postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py
new file mode 100644
index 00000000..435a6c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: yes
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: no
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.general.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.general.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (needs for immedately_reserverd needs 9.6+):
+ if self.cursor.connection.server_version < 96000:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py
new file mode 100644
index 00000000..0e2b3612
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: yes
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: yes
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(yes) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+seealso:
+- module: community.general.postgresql_publication
+- module: community.general.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=yes)
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: yes
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: no
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the master to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py
new file mode 100644
index 00000000..5260853d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: community.general.postgresql_sequence
+- module: community.general.postgresql_idx
+- module: community.general.postgresql_info
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.general.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.general.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.general.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.general.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.general.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.general.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.general.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.general.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.general.postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ community.general.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.general.postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py
new file mode 100644
index 00000000..2062e6a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.general.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.general.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.general.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py
new file mode 100644
index 00000000..79c987a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: yes
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=yes) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: yes
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(yes), does not inspect the database for password changes.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: no
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections through the options are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- Supports ``check_mode``.
+seealso:
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- module: community.general.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.general.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ community.general.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ community.general.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.general.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ community.general.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.general.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+import ansible_collections.community.general.plugins.module_utils.saslprep as saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py
new file mode 100644
index 00000000..9d03408e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+seealso:
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py
new file mode 100644
index 00000000..90798672
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The name of the virtual machine.
+ type: str
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ type: str
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ default: 2
+ type: int
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ default: 2048
+ type: int
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ type: str
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ type: int
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ type: str
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ count:
+ description:
+ - The number of virtual machines to create.
+ type: int
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ type: str
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ type: bool
+ default: 'no'
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ type: int
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - create or terminate instances
+ - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ type: str
+ default: 'present'
+ disk_type:
+ description:
+ - the type of disk to be allocated.
+ type: str
+ choices: [SSD, HDD]
+ default: HDD
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example
+- name: Create three servers and enumerate their names
+ community.general.profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+- name: Remove virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+- name: Start virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+- name: Stop virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+'''
+
+import re
+import uuid
+import time
+import traceback
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
new file mode 100644
index 00000000..e3ba1d49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ type: str
+ description:
+ description:
+ - The description of the virtual datacenter.
+ type: str
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ type: str
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Create or terminate datacenters.
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a datacenter
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600, type='int'),
+ state=dict(default='present'), # @TODO add choices
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
new file mode 100644
index 00000000..49941241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ type: str
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ - If not specified, it defaults to a value based on UUID4.
+ type: str
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: true
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: true
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+- name: Remove a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _make_default_name():
+ return str(uuid.uuid4()).replace('-', '')[:10]
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(required=True),
+ server=dict(required=True),
+ name=dict(),
+ lan=dict(),
+ subscription_user=dict(required=True),
+ subscription_password=dict(required=True, no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ ),
+ required_if=(
+ ('state', 'absent', ['name']),
+ ('state', 'present', ['lan']),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
new file mode 100644
index 00000000..a63cbcdd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ type: str
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ type: str
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ required: false
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ required: false
+ disk_type:
+ description:
+ - The disk type of the volume.
+ type: str
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ type: str
+ required: false
+ default: UNKNOWN
+ count:
+ description:
+ - The number of volumes you wish to create.
+ type: int
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ type: bool
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create multiple volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+- name: Remove Volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+import traceback
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, n)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
new file mode 100644
index 00000000..72f03e67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ type: str
+ volume:
+ description:
+ - The volume name or ID.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Attach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+- name: Detach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py
new file mode 100644
index 00000000..140d56f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py
@@ -0,0 +1,735 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ vmid:
+ description:
+ - the instance id
+ - if not set, the next available VM ID will be fetched from ProxmoxAPI.
+ - if not set, will be fetched from PromoxAPI based on the hostname
+ type: str
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ type: bool
+ default: 'no'
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ type: str
+ pool:
+ description:
+ - Proxmox VE resource pool
+ type: str
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ type: str
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ type: str
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ type: str
+ disk:
+ description:
+ - hard disk size in GB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ memory:
+ description:
+ - memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ swap:
+ description:
+ - swap memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ netif:
+ description:
+ - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
+ type: dict
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
+ type: dict
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ type: str
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ type: str
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ type: str
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the instance
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+ pubkey:
+ description:
+ - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
+ type: str
+ unprivileged:
+ description:
+ - Indicate if the container should be unprivileged
+ type: bool
+ default: 'no'
+ description:
+ description:
+ - Specify the description for the container. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ version_added: '0.2.0'
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: '0.2.0'
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = r'''
+- name: Create new container with minimal options
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with hookscript and description
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ hookscript: 'local:snippets/vm_hook.sh'
+ description: created with ansible
+
+- name: Create new container automatically selecting the next available vmid.
+ community.general.proxmox:
+ node: 'uk-mc02'
+ api_user: 'root@pam'
+ api_password: '1q2w3e'
+ api_host: 'node1'
+ password: '123456'
+ hostname: 'example.org'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options with force(it will rewrite existing container)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: yes
+
+- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options defining network interface with dhcp
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining network interface with static ip
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining a mount with 8GB
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+- name: Create new container with minimal options defining a cpu core limit
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ cores: 2
+
+- name: Start container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+- name: >
+ Start container with mount. You should enter a 90-second timeout because servers
+ with additional disks take longer to boot
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+ timeout: 90
+
+- name: Stop container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+- name: Stop container with force
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ force: yes
+ state: stopped
+
+- name: Restart container(stopped or mounted container you can't restart)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: restarted
+
+- name: Remove container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VZ_TYPE = None
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, hostname):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
+
+
+def get_instance(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if VZ_TYPE == 'lxc':
+ kwargs['cpulimit'] = cpus
+ kwargs['rootfs'] = disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ if 'pubkey' in kwargs:
+ if proxmox_version(proxmox) >= LooseVersion('4.2'):
+ kwargs['ssh-public-keys'] = kwargs['pubkey']
+ del kwargs['pubkey']
+ else:
+ kwargs['cpus'] = cpus
+ kwargs['disk'] = disk
+
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ vmid=dict(required=False),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ pool=dict(),
+ password=dict(no_log=True),
+ hostname=dict(),
+ ostemplate=dict(),
+ disk=dict(type='str'),
+ cores=dict(type='int'),
+ cpus=dict(type='int'),
+ memory=dict(type='int'),
+ swap=dict(type='int'),
+ netif=dict(type='dict'),
+ mounts=dict(type='dict'),
+ ip_address=dict(),
+ onboot=dict(type='bool'),
+ storage=dict(default='local'),
+ cpuunits=dict(type='int'),
+ nameserver=dict(),
+ searchdomain=dict(),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ pubkey=dict(type='str', default=None),
+ unprivileged=dict(type='bool', default=False),
+ description=dict(type='str'),
+ hookscript=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ disk="3",
+ cores=1,
+ cpus=1,
+ memory=512,
+ swap=0,
+ onboot=False,
+ cpuunits=1000,
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc'
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = get_nextvmid(module, proxmox)
+ elif not vmid and hostname:
+ hosts = get_vmid(proxmox, hostname)
+ if len(hosts) == 0:
+ module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
+ vmid = hosts[0]
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ cores=module.params['cores'],
+ pool=module.params['pool'],
+ password=module.params['password'],
+ hostname=module.params['hostname'],
+ ostemplate=module.params['ostemplate'],
+ netif=module.params['netif'],
+ mounts=module.params['mounts'],
+ ip_address=module.params['ip_address'],
+ onboot=int(module.params['onboot']),
+ cpuunits=module.params['cpuunits'],
+ nameserver=module.params['nameserver'],
+ searchdomain=module.params['searchdomain'],
+ force=int(module.params['force']),
+ pubkey=module.params['pubkey'],
+ unprivileged=int(module.params['unprivileged']),
+ description=module.params['description'],
+ hookscript=module.params['hookscript'])
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
+ getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout)):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
new file mode 100644
index 00000000..fc7c37c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_domain_info
+short_description: Retrieve information about one or more Proxmox VE domains
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE domains.
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing domains
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_domains
+
+- name: Retrieve information about the pve domain
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_domain_pve
+'''
+
+
+RETURN = '''
+proxmox_domains:
+ description: List of authentication domains.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the realm.
+ returned: on success
+ type: str
+ realm:
+ description: Realm name.
+ returned: on success
+ type: str
+ type:
+ description: Realm type.
+ returned: on success
+ type: str
+ digest:
+ description: Realm hash.
+ returned: on success, can be absent
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
+ def get_domain(self, realm):
+ try:
+ domain = self.proxmox_api.access.domains.get(realm)
+ except Exception:
+ self.module.fail_json(msg="Domain '%s' does not exist" % realm)
+ domain['realm'] = realm
+ return domain
+
+ def get_domains(self):
+ domains = self.proxmox_api.access.domains.get()
+ return domains
+
+
+def proxmox_domain_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ domain_info_args = proxmox_domain_info_argument_spec()
+ module_args.update(domain_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxDomainInfoAnsible(module)
+ domain = module.params['domain']
+
+ if domain:
+ domains = [proxmox.get_domain(realm=domain)]
+ else:
+ domains = proxmox.get_domains()
+ result['proxmox_domains'] = domains
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
new file mode 100644
index 00000000..063d28e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_group_info
+short_description: Retrieve information about one or more Proxmox VE groups
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE groups
+options:
+ group:
+ description:
+ - Restrict results to a specific group.
+ aliases: ['groupid', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing groups
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_groups
+
+- name: Retrieve information about the admin group
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ group: admin
+ register: proxmox_group_admin
+'''
+
+
+RETURN = '''
+proxmox_groups:
+ description: List of groups.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the group.
+ returned: on success, can be absent
+ type: str
+ groupid:
+ description: Group name.
+ returned: on success
+ type: str
+ users:
+ description: List of users in the group.
+ returned: on success
+ type: list
+ elements: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
+ def get_group(self, groupid):
+ try:
+ group = self.proxmox_api.access.groups.get(groupid)
+ except Exception:
+ self.module.fail_json(msg="Group '%s' does not exist" % groupid)
+ group['groupid'] = groupid
+ return ProxmoxGroup(group)
+
+ def get_groups(self):
+ groups = self.proxmox_api.access.groups.get()
+ return [ProxmoxGroup(group) for group in groups]
+
+
+class ProxmoxGroup:
+ def __init__(self, group):
+ self.group = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in group.items():
+ if k == 'users' and type(v) == str:
+ self.group['users'] = v.split(',')
+ elif k == 'members':
+ self.group['users'] = group['members']
+ else:
+ self.group[k] = v
+
+
+def proxmox_group_info_argument_spec():
+ return dict(
+ group=dict(type='str', aliases=['groupid', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ group_info_args = proxmox_group_info_argument_spec()
+ module_args.update(group_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxGroupInfoAnsible(module)
+ group = module.params['group']
+
+ if group:
+ groups = [proxmox.get_group(group=group)]
+ else:
+ groups = proxmox.get_groups()
+ result['proxmox_groups'] = [group.group for group in groups]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
new file mode 100644
index 00000000..0161fefc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
@@ -0,0 +1,1449 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
+options:
+ acpi:
+ description:
+ - Specify if ACPI should be enabled/disabled.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ agent:
+ description:
+ - Specify if the QEMU Guest Agent should be enabled/disabled.
+ type: bool
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ type: str
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ autostart:
+ description:
+ - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ type: str
+ choices: ['seabios', 'ovmf']
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ type: str
+ cicustom:
+ description:
+ - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
+ type: str
+ version_added: 1.3.0
+ cipassword:
+ description:
+ - 'cloud-init: password of default user to create.'
+ type: str
+ version_added: 1.3.0
+ citype:
+ description:
+ - 'cloud-init: Specifies the cloud-init configuration format.'
+ - The default depends on the configured operating system type (C(ostype)).
+ - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ type: str
+ choices: ['nocloud', 'configdrive2']
+ version_added: 1.3.0
+ ciuser:
+ description:
+ - 'cloud-init: username of default user to create.'
+ type: str
+ version_added: 1.3.0
+ clone:
+ description:
+ - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ type: int
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ type: str
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ type: str
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ type: str
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used with states C(stopped) and C(restarted).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ format:
+ description:
+ - Target drive's backing file's data format.
+ - Used only with clone
+ - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
+ not specifying this option is equivalent to setting it to C(unspecified).
+ Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
+ type: str
+ choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ type: bool
+ full:
+ description:
+ - Create a full copy of all disk. This is always done when you clone a normal VM.
+ - For VM templates, we try to create a linked clone by default.
+ - Used only with clone
+ type: bool
+ default: 'yes'
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ type: str
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ type: str
+ choices: ['any', '2', '1024']
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ ipconfig:
+ description:
+ - 'cloud-init: Set the IP configuration.'
+ - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
+ - Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
+ - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
+ - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
+ - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
+ - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
+ - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
+ type: dict
+ version_added: 1.3.0
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ type: str
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ type: bool
+ lock:
+ description:
+ - Lock/unlock the VM.
+ type: str
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ type: str
+ memory:
+ description:
+ - Memory size in MB for instance.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ type: int
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ type: str
+ nameservers:
+ description:
+ - 'cloud-init: DNS server IP address(es).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ type: dict
+ newid:
+ description:
+ - VMID for the clone. Used only with clone.
+ - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ node:
+ description:
+ - Proxmox VE node, where the new VM will be created.
+ - Only required for C(state=present).
+ - For other states, it will be autodiscovered.
+ type: str
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ type: dict
+ numa_enabled:
+ description:
+ - Enables NUMA.
+ type: bool
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ type: dict
+ pool:
+ description:
+ - Add the new VM to the specified pool.
+ type: str
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ type: bool
+ reboot:
+ description:
+ - Allow reboot. If set to C(yes), the VM exit on reboot.
+ type: bool
+ revert:
+ description:
+ - Revert a pending change.
+ type: str
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ type: str
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ searchdomains:
+ description:
+ - 'cloud-init: Sets DNS search domain(s).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ type: int
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ type: bool
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ type: str
+ snapname:
+ description:
+ - The name of the snapshot. Used only with clone.
+ type: str
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ sshkeys:
+ description:
+ - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
+ type: str
+ version_added: 1.3.0
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ type: str
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ default: present
+ storage:
+ description:
+ - Target storage for full clone.
+ type: str
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ target:
+ description:
+ - Target node. Only allowed if the original VM is on shared storage.
+ - Used only with clone
+ type: str
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ type: bool
+ template:
+ description:
+ - Enables/disables the template.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ update:
+ description:
+ - If C(yes), the VM will be updated with new value.
+ - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
+ - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
+ - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'no'
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ type: int
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ vmid:
+ description:
+ - Specifies the VM ID. Instead use I(name) parameter.
+ - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ type: str
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
+ I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
+ I(tablet), I(template), I(vga), options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+EXAMPLES = '''
+- name: Create new VM with minimal options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+
+- name: Create new VM with minimal options and given vmid
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ vmid: 100
+
+- name: Create new VM with two network interface options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ net1: 'e1000,bridge=vmbr2'
+
+- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ virtio:
+ virtio0: 'VMs_LVM:10'
+ virtio1: 'VMs:2,format=qcow2'
+ virtio2: 'VMs:5,format=raw'
+ cores: 4
+ vcpus: 2
+
+- name: >
+ Clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ format: qcow2
+ timeout: 500
+
+- name: >
+ Create linked clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ full: no
+ format: unspecified
+ timeout: 500
+
+- name: Clone VM with source vmid and target newid and raw format
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: arbitrary_name
+ vmid: 108
+ newid: 152
+ name: zavala
+ node: sabrewulf
+ storage: LVM_STO
+ format: raw
+ timeout: 300
+
+- name: Create new VM and lock it for snapshot
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ lock: snapshot
+
+- name: Create new VM and set protection to disable the remove VM and remove disk operations
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ protection: yes
+
+- name: Create new VM using cloud-init with a username and password
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ ciuser: mylinuxuser
+ cipassword: supersecret
+ searchdomains: 'mydomain.internal'
+ nameservers: 1.1.1.1
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
+
+- name: Create new VM using Cloud-Init with an ssh key
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
+ searchdomains: 'mydomain.internal'
+ nameservers:
+ - '1.1.1.1'
+ - '8.8.8.8'
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24'
+
+- name: Start VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: started
+
+- name: Stop VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+
+- name: Stop VM with force
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+ force: yes
+
+- name: Restart VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: restarted
+
+- name: Remove VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: absent
+
+- name: Get VM current state
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: current
+
+- name: Update VM configuration
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ update: yes
+
+- name: Delete QEMU parameters
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ delete: 'args,template,cpulimit'
+
+- name: Revert a pending change
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ revert: 'template,cpulimit'
+'''
+
+RETURN = '''
+devices:
+ description: The list of devices created or used.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "ide0": "VMS_LVM:vm-115-disk-1",
+ "ide1": "VMs:115/vm-115-disk-3.raw",
+ "virtio0": "VMS_LVM:vm-115-disk-2",
+ "virtio1": "VMs:115/vm-115-disk-1.qcow2",
+ "virtio2": "VMs:115/vm-115-disk-2.raw"
+ }'
+mac:
+ description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "net0": "3E:6E:97:D2:31:9F",
+ "net1": "B6:A1:FC:EF:78:A4"
+ }'
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description:
+ - The current virtual machine status.
+ - Returned only when C(state=current)
+ returned: success
+ type: dict
+ sample: '{
+ "changed": false,
+ "msg": "VM kropta with vmid = 110 is running",
+ "status": "running"
+ }'
+'''
+
+import os
+import re
+import time
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, name):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name]
+
+
+def get_vm(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def get_vminfo(module, proxmox, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = proxmox.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ for k, v in kwargs.items():
+ if re.match(r'net[0-9]', k) is not None:
+ interface = k
+ k = vm[k]
+ k = re.search('=(.*?),', k).group(1)
+ mac[interface] = k
+ if (re.match(r'virtio[0-9]', k) is not None or
+ re.match(r'ide[0-9]', k) is not None or
+ re.match(r'scsi[0-9]', k) is not None or
+ re.match(r'sata[0-9]', k) is not None):
+ device = k
+ k = vm[k]
+ k = re.search('(.*?),', k).group(1)
+ devices[device] = k
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+
+def settings(module, proxmox, vmid, node, name, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if proxmox_node.qemu(vmid).config.set(**kwargs) is None:
+ return True
+ else:
+ return False
+
+
+def wait_for_task(module, proxmox, node, taskid):
+ timeout = module.params['timeout']
+
+ while timeout:
+ task = proxmox.nodes(node).tasks(taskid).status.get()
+ if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
+ # Wait an extra second as the API can be a ahead of the hypervisor
+ time.sleep(1)
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ break
+ time.sleep(1)
+ return False
+
+
+def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force', 'protection', 'skiplock']
+ only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
+
+ # valide clone parameters
+ valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
+ clone_params = {}
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
+
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
+
+ # The features work only on PVE 4+
+ if PVE_MAJOR_VERSION < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # The features work only on PVE 6
+ if PVE_MAJOR_VERSION < 6:
+ for p in only_v6:
+ if p in kwargs:
+ del kwargs[p]
+
+ # 'sshkeys' param expects an urlencoded string
+ if 'sshkeys' in kwargs:
+ urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
+ kwargs['sshkeys'] = str(urlencoded_ssh_keys)
+
+ # If update, don't update disk (virtio, ide, sata, scsi) and network interface
+ # pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
+ if update:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'net' in kwargs:
+ del kwargs['net']
+ if 'force' in kwargs:
+ del kwargs['force']
+ if 'pool' in kwargs:
+ del kwargs['pool']
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Rename numa_enabled to numa. According the API documentation
+ if 'numa_enabled' in kwargs:
+ kwargs['numa'] = kwargs['numa_enabled']
+ del kwargs['numa_enabled']
+
+ # PVE api expects strings for the following params
+ if 'nameservers' in module.params:
+ nameservers = module.params.pop('nameservers')
+ if nameservers:
+ kwargs['nameserver'] = ' '.join(nameservers)
+ if 'searchdomains' in module.params:
+ searchdomains = module.params.pop('searchdomains')
+ if searchdomains:
+ kwargs['searchdomain'] = ' '.join(searchdomains)
+
+ # -args and skiplock require root@pam user
+ if module.params['api_user'] == "root@pam" and module.params['args'] is None:
+ if not update:
+ kwargs['args'] = vm_args
+ elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
+ kwargs['args'] = module.params['args']
+ elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
+ module.fail_json(msg='args parameter require root@pam user. ')
+
+ if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
+ module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ if update:
+ if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
+ return True
+ else:
+ return False
+ elif module.params['clone'] is not None:
+ for param in valid_clone_params:
+ if module.params[param] is not None:
+ clone_params[param] = module.params[param]
+ clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
+ taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
+ else:
+ taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ if not wait_for_task(module, proxmox, node, taskid):
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def start_vm(module, proxmox, vm):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.start.post()
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def stop_vm(module, proxmox, vm, force):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ acpi=dict(type='bool'),
+ agent=dict(type='bool'),
+ args=dict(type='str'),
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ autostart=dict(type='bool'),
+ balloon=dict(type='int'),
+ bios=dict(choices=['seabios', 'ovmf']),
+ boot=dict(type='str'),
+ bootdisk=dict(type='str'),
+ cicustom=dict(type='str'),
+ cipassword=dict(type='str', no_log=True),
+ citype=dict(type='str', choices=['nocloud', 'configdrive2']),
+ ciuser=dict(type='str'),
+ clone=dict(type='str', default=None),
+ cores=dict(type='int'),
+ cpu=dict(type='str'),
+ cpulimit=dict(type='int'),
+ cpuunits=dict(type='int'),
+ delete=dict(type='str', default=None),
+ description=dict(type='str'),
+ digest=dict(type='str'),
+ force=dict(type='bool'),
+ format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
+ freeze=dict(type='bool'),
+ full=dict(type='bool', default=True),
+ hostpci=dict(type='dict'),
+ hotplug=dict(type='str'),
+ hugepages=dict(choices=['any', '2', '1024']),
+ ide=dict(type='dict'),
+ ipconfig=dict(type='dict'),
+ keyboard=dict(type='str'),
+ kvm=dict(type='bool'),
+ localtime=dict(type='bool'),
+ lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine=dict(type='str'),
+ memory=dict(type='int'),
+ migrate_downtime=dict(type='int'),
+ migrate_speed=dict(type='int'),
+ name=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ net=dict(type='dict'),
+ newid=dict(type='int', default=None),
+ node=dict(),
+ numa=dict(type='dict'),
+ numa_enabled=dict(type='bool'),
+ onboot=dict(type='bool'),
+ ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']),
+ parallel=dict(type='dict'),
+ pool=dict(type='str'),
+ protection=dict(type='bool'),
+ reboot=dict(type='bool'),
+ revert=dict(type='str'),
+ sata=dict(type='dict'),
+ scsi=dict(type='dict'),
+ scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial=dict(type='dict'),
+ searchdomains=dict(type='list', elements='str'),
+ shares=dict(type='int'),
+ skiplock=dict(type='bool'),
+ smbios=dict(type='str'),
+ snapname=dict(type='str'),
+ sockets=dict(type='int'),
+ sshkeys=dict(type='str'),
+ startdate=dict(type='str'),
+ startup=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ storage=dict(type='str'),
+ tablet=dict(type='bool'),
+ target=dict(type='str'),
+ tdf=dict(type='bool'),
+ template=dict(type='bool'),
+ timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ vcpus=dict(type='int'),
+ vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio=dict(type='dict'),
+ vmid=dict(type='int', default=None),
+ watchdog=dict(),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ ),
+ mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
+ required_one_of=[('name', 'vmid',)],
+ required_if=[('state', 'present', ['node'])]
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ clone = module.params['clone']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ delete = module.params['delete']
+ memory = module.params['memory']
+ name = module.params['name']
+ newid = module.params['newid']
+ node = module.params['node']
+ revert = module.params['revert']
+ sockets = module.params['sockets']
+ state = module.params['state']
+ update = bool(module.params['update'])
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ acpi=True,
+ autostart=False,
+ balloon=0,
+ boot='cnd',
+ cores=1,
+ cpu='kvm64',
+ cpuunits=1000,
+ force=False,
+ format='qcow2',
+ kvm=True,
+ memory=512,
+ ostype='l26',
+ sockets=1,
+ tablet=False,
+ template=False,
+ vga='std',
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ if module.params['format'] == 'unspecified':
+ module.params['format'] = None
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global PVE_MAJOR_VERSION
+ version = proxmox_version(proxmox)
+ PVE_MAJOR_VERSION = 3 if version < LooseVersion('4.0') else version.version[0]
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
+ if not vmid:
+ if state == 'present' and not update and not clone and not delete and not revert:
+ try:
+ vmid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ else:
+ clone_target = clone or name
+ try:
+ vmid = get_vmid(proxmox, clone_target)[0]
+ except Exception:
+ vmid = -1
+
+ if clone is not None:
+ # If newid is not defined then retrieve the next free id from ProxmoxAPI
+ if not newid:
+ try:
+ newid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+
+ # Ensure source VM name exists when cloning
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
+
+ # Ensure source VM id exists when cloning
+ if not get_vm(proxmox, vmid):
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ # Ensure the choosen VM name doesn't already exist when cloning
+ if get_vmid(proxmox, name):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+
+ # Ensure the choosen VM id doesn't already exist when cloning
+ if get_vm(proxmox, newid):
+ module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
+
+ if delete is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, delete=delete)
+ module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
+
+ if revert is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, revert=revert)
+ module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+
+ if state == 'present':
+ try:
+ if get_vm(proxmox, vmid) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
+ elif get_vmid(proxmox, name) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+ elif not (node, name):
+ module.fail_json(msg='node, name is mandatory for creating/updating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ acpi=module.params['acpi'],
+ agent=module.params['agent'],
+ autostart=module.params['autostart'],
+ balloon=module.params['balloon'],
+ bios=module.params['bios'],
+ boot=module.params['boot'],
+ bootdisk=module.params['bootdisk'],
+ cicustom=module.params['cicustom'],
+ cipassword=module.params['cipassword'],
+ citype=module.params['citype'],
+ ciuser=module.params['ciuser'],
+ cpulimit=module.params['cpulimit'],
+ cpuunits=module.params['cpuunits'],
+ description=module.params['description'],
+ digest=module.params['digest'],
+ force=module.params['force'],
+ freeze=module.params['freeze'],
+ hostpci=module.params['hostpci'],
+ hotplug=module.params['hotplug'],
+ hugepages=module.params['hugepages'],
+ ide=module.params['ide'],
+ ipconfig=module.params['ipconfig'],
+ keyboard=module.params['keyboard'],
+ kvm=module.params['kvm'],
+ localtime=module.params['localtime'],
+ lock=module.params['lock'],
+ machine=module.params['machine'],
+ migrate_downtime=module.params['migrate_downtime'],
+ migrate_speed=module.params['migrate_speed'],
+ net=module.params['net'],
+ numa=module.params['numa'],
+ numa_enabled=module.params['numa_enabled'],
+ onboot=module.params['onboot'],
+ ostype=module.params['ostype'],
+ parallel=module.params['parallel'],
+ pool=module.params['pool'],
+ protection=module.params['protection'],
+ reboot=module.params['reboot'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ scsihw=module.params['scsihw'],
+ serial=module.params['serial'],
+ shares=module.params['shares'],
+ skiplock=module.params['skiplock'],
+ smbios1=module.params['smbios'],
+ snapname=module.params['snapname'],
+ sshkeys=module.params['sshkeys'],
+ startdate=module.params['startdate'],
+ startup=module.params['startup'],
+ tablet=module.params['tablet'],
+ target=module.params['target'],
+ tdf=module.params['tdf'],
+ template=module.params['template'],
+ vcpus=module.params['vcpus'],
+ vga=module.params['vga'],
+ virtio=module.params['virtio'],
+ watchdog=module.params['watchdog'])
+
+ if not clone:
+ get_vminfo(module, proxmox, node, vmid,
+ ide=module.params['ide'],
+ net=module.params['net'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ virtio=module.params['virtio'])
+ if update:
+ module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ if update:
+ module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
+ elif clone is not None:
+ module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
+ else:
+ module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+
+ elif state == 'started':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False)
+
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+ taskid = proxmox_node.qemu.delete(vmid)
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ else:
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'current':
+ status = {}
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py
new file mode 100644
index 00000000..541dc28e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: 'no'
+ type: bool
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ type: str
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ type: path
+ template:
+ description:
+ - the template name
+ - Required for state C(absent) to delete a template.
+ - Required for state C(present) to download an appliance container template (pveam).
+ type: str
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ type: str
+ default: 'vztmpl'
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the template
+ type: str
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = '''
+- name: Upload new openvz template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: >
+ Upload new openvz template with minimal options use environment
+ PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: Upload new openvz template with all options and force overwrite
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: yes
+
+- name: Delete template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+
+- name: Download proxmox appliance container template
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+
+
+def task_status(module, proxmox, node, taskid, timeout):
+ """
+ Check the task status and wait until the task is completed or the timeout is reached.
+ """
+ while timeout:
+ task_status = proxmox.nodes(node).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def upload_template(module, proxmox, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def download_template(module, proxmox, node, storage, template, timeout):
+ taskid = proxmox.nodes(node).aplinfo.post(storage=storage, template=template)
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ src=dict(type='path'),
+ template=dict(),
+ content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
+ storage=dict(default='local'),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ # Used to test the validity of the token if given
+ proxmox.version.get()
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
+
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+
+ if download_template(module, proxmox, node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+
+ template = os.path.basename(src)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if upload_template(module, proxmox, node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
new file mode 100644
index 00000000..1de93e60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_user_info
+short_description: Retrieve information about one or more Proxmox VE users
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE users
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm']
+ type: str
+ user:
+ description:
+ - Restrict results to a specific user.
+ aliases: ['name']
+ type: str
+ userid:
+ description:
+ - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+EXAMPLES = '''
+- name: List existing users
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_users
+
+- name: List existing users in the pve authentication realm
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_users_pve
+
+- name: Retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ userid: admin@pve
+ register: proxmox_user_admin
+
+- name: Alternative way to retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ user: admin
+ domain: pve
+ register: proxmox_user_admin
+'''
+
+
+RETURN = '''
+proxmox_users:
+ description: List of users.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the user.
+ returned: on success
+ type: str
+ domain:
+ description: User's authentication realm, also the right part of the user ID.
+ returned: on success
+ type: str
+ email:
+ description: User's email address.
+ returned: on success
+ type: str
+ enabled:
+ description: User's account state.
+ returned: on success
+ type: bool
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ firstname:
+ description: User's first name.
+ returned: on success
+ type: str
+ groups:
+ description: List of groups which the user is a member of.
+ returned: on success
+ type: list
+ elements: str
+ keys:
+ description: User's two factor authentication keys.
+ returned: on success
+ type: str
+ lastname:
+ description: User's last name.
+ returned: on success
+ type: str
+ tokens:
+ description: List of API tokens associated to the user.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the token.
+ returned: on success
+ type: str
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ privsep:
+ description: Describe if the API token is further restricted with ACLs or is fully privileged.
+ returned: on success
+ type: bool
+ tokenid:
+ description: Token name.
+ returned: on success
+ type: str
+ user:
+ description: User's login name, also the left part of the user ID.
+ returned: on success
+ type: str
+ userid:
+ description: Proxmox user ID, represented as user@realm.
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxUserInfoAnsible(ProxmoxAnsible):
+ def get_user(self, userid):
+ try:
+ user = self.proxmox_api.access.users.get(userid)
+ except Exception:
+ self.module.fail_json(msg="User '%s' does not exist" % userid)
+ user['userid'] = userid
+ return ProxmoxUser(user)
+
+ def get_users(self, domain=None):
+ users = self.proxmox_api.access.users.get(full=1)
+ users = [ProxmoxUser(user) for user in users]
+ if domain:
+ return [user for user in users if user.user['domain'] == domain]
+ return users
+
+
+class ProxmoxUser:
+ def __init__(self, user):
+ self.user = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in user.items():
+ if k == 'enable':
+ self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
+ elif k == 'userid':
+ self.user['user'] = user['userid'].split('@')[0]
+ self.user['domain'] = user['userid'].split('@')[1]
+ self.user[k] = v
+ elif k in ['groups', 'tokens'] and (v == '' or v is None):
+ self.user[k] = []
+ elif k == 'groups' and type(v) == str:
+ self.user['groups'] = v.split(',')
+ elif k == 'tokens' and type(v) == list:
+ for token in v:
+ if 'privsep' in token:
+ token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
+ self.user['tokens'] = v
+ elif k == 'tokens' and type(v) == dict:
+ self.user['tokens'] = list()
+ for tokenid, tokenvalues in v.items():
+ t = tokenvalues
+ t['tokenid'] = tokenid
+ if 'privsep' in tokenvalues:
+ t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
+ self.user['tokens'].append(t)
+ else:
+ self.user[k] = v
+
+
+def proxmox_user_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm']),
+ user=dict(type='str', aliases=['name']),
+ userid=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ user_info_args = proxmox_user_info_argument_spec()
+ module_args.update(user_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxUserInfoAnsible(module)
+ domain = module.params['domain']
+ user = module.params['user']
+ if user and domain:
+ userid = user + '@' + domain
+ else:
+ userid = module.params['userid']
+
+ if userid:
+ users = [proxmox.get_user(userid=userid)]
+ else:
+ users = proxmox.get_users(domain=domain)
+ result['proxmox_users'] = [user.user for user in users]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
new file mode 100644
index 00000000..8d9374a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+#
+# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
+# Frameworks
+# Copyright (C) 2016 PubNub Inc.
+# http://www.pubnub.com/
+# http://www.pubnub.com/terms
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pubnub_blocks
+short_description: PubNub blocks management module.
+description:
+ - "This module allows Ansible to interface with the PubNub BLOCKS
+ infrastructure by providing the following operations: create / remove,
+ start / stop and rename for blocks and create / modify / remove for event
+ handlers"
+author:
+ - PubNub <support@pubnub.com> (@pubnub)
+ - Sergey Mamontov <sergey@pubnub.com> (@parfeon)
+requirements:
+ - "python >= 2.7"
+ - "pubnub_blocks_client >= 1.0"
+options:
+ email:
+ description:
+ - Email from account for which new session should be started.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ password:
+ description:
+ - Password which match to account to which specified C(email) belong.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ cache:
+ description: >
+ In case if single play use blocks management module few times it is
+ preferred to enabled 'caching' by making previous module to share
+ gathered artifacts and pass them to this parameter.
+ required: false
+ type: dict
+ default: {}
+ account:
+ description:
+ - "Name of PubNub account for from which C(application) will be used to
+ manage blocks."
+ - "User's account will be used if value not set or empty."
+ type: str
+ required: false
+ application:
+ description:
+ - "Name of target PubNub application for which blocks configuration on
+ specific C(keyset) will be done."
+ type: str
+ required: true
+ keyset:
+ description:
+ - Name of application's keys set which is bound to managed blocks.
+ type: str
+ required: true
+ state:
+ description:
+ - "Intended block state after event handlers creation / update process
+ will be completed."
+ required: false
+ default: 'present'
+ choices: ['started', 'stopped', 'present', 'absent']
+ type: str
+ name:
+ description:
+ - Name of managed block which will be later visible on admin.pubnub.com.
+ required: true
+ type: str
+ description:
+ description:
+ - Short block description which will be later visible on
+ admin.pubnub.com. Used only if block doesn't exists and won't change
+ description for existing block.
+ required: false
+ type: str
+ event_handlers:
+ description:
+ - "List of event handlers which should be updated for specified block
+ C(name)."
+ - "Each entry for new event handler should contain: C(name), C(src),
+ C(channels), C(event). C(name) used as event handler name which can be
+ used later to make changes to it."
+ - C(src) is full path to file with event handler code.
+ - "C(channels) is name of channel from which event handler is waiting
+ for events."
+ - "C(event) is type of event which is able to trigger event handler:
+ I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ - "Each entry for existing handlers should contain C(name) (so target
+ handler can be identified). Rest parameters (C(src), C(channels) and
+ C(event)) can be added if changes required for them."
+ - "It is possible to rename event handler by adding C(changes) key to
+ event handler payload and pass dictionary, which will contain single key
+ C(name), where new name should be passed."
+ - "To remove particular event handler it is possible to set C(state) for
+ it to C(absent) and it will be removed."
+ required: false
+ default: []
+ type: list
+ changes:
+ description:
+ - "List of fields which should be changed by block itself (doesn't
+ affect any event handlers)."
+ - "Possible options for change is: C(name)."
+ required: false
+ default: {}
+ type: dict
+ validate_certs:
+ description:
+ - "This key allow to try skip certificates check when performing REST API
+ calls. Sometimes host may have issues with certificates on it and this
+ will cause problems to call PubNub REST API."
+ - If check should be ignored C(False) should be passed to this parameter.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+# Event handler create example.
+- name: Create single event handler
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ src: '{{ path_to_handler_source }}'
+ name: '{{ handler_name }}'
+ event: 'js-before-publish'
+ channels: '{{ handler_channel }}'
+
+# Change event handler trigger event type.
+- name: Change event handler 'event'
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ name: '{{ handler_name }}'
+ event: 'js-after-publish'
+
+# Stop block and event handlers.
+- name: Stopping block
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: stop
+
+# Multiple module calls with cached result passing
+- name: Create '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_1_source }}'
+ name: '{{ event_handler_1_name }}'
+ channels: '{{ event_handler_1_channel }}'
+ event: 'js-before-publish'
+- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_2_source }}'
+ name: '{{ event_handler_2_name }}'
+ channels: '{{ event_handler_2_channel }}'
+ event: 'js-before-publish'
+- name: Start '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: started
+'''
+
+RETURN = '''
+module_cache:
+ description: "Cached account information. In case if with single play module
+ used few times it is better to pass cached data to next module calls to speed
+ up process."
+ type: dict
+ returned: always
+'''
+import copy
+import os
+
+try:
+ # Import PubNub BLOCKS client.
+ from pubnub_blocks_client import User, Account, Owner, Application, Keyset
+ from pubnub_blocks_client import Block, EventHandler
+ from pubnub_blocks_client import exceptions
+ HAS_PUBNUB_BLOCKS_CLIENT = True
+except ImportError:
+ HAS_PUBNUB_BLOCKS_CLIENT = False
+ User = None
+ Account = None
+ Owner = None
+ Application = None
+ Keyset = None
+ Block = None
+ EventHandler = None
+ exceptions = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+def pubnub_user(module):
+ """Create and configure user model if it possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+
+ :rtype: User
+ :return: Reference on initialized and ready to use user or 'None' in
+ case if not all required information has been passed to block.
+ """
+ user = None
+ params = module.params
+
+ if params.get('cache') and params['cache'].get('module_cache'):
+ cache = params['cache']['module_cache']
+ user = User()
+ user.restore(cache=copy.deepcopy(cache['pnm_user']))
+ elif params.get('email') and params.get('password'):
+ user = User(email=params.get('email'), password=params.get('password'))
+ else:
+ err_msg = 'It looks like not account credentials has been passed or ' \
+ '\'cache\' field doesn\'t have result of previous module ' \
+ 'call.'
+ module.fail_json(msg='Missing account credentials.',
+ description=err_msg, changed=False)
+
+ return user
+
+
+def pubnub_account(module, user):
+ """Create and configure account if it is possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type user: User
+ :param user: Reference on authorized user for which one of accounts
+ should be used during manipulations with block.
+
+ :rtype: Account
+ :return: Reference on initialized and ready to use account or 'None' in
+ case if not all required information has been passed to block.
+ """
+ params = module.params
+ if params.get('account'):
+ account_name = params.get('account')
+ account = user.account(name=params.get('account'))
+ if account is None:
+ err_frmt = 'It looks like there is no \'{0}\' account for ' \
+ 'authorized user. Please make sure what correct ' \
+ 'name has been passed during module configuration.'
+ module.fail_json(msg='Missing account.',
+ description=err_frmt.format(account_name),
+ changed=False)
+ else:
+ account = user.accounts()[0]
+
+ return account
+
+
+def pubnub_application(module, account):
+ """Retrieve reference on target application from account model.
+
+ NOTE: In case if account authorization will fail or there is no
+ application with specified name, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model from which reference
+ on application should be fetched.
+
+ :rtype: Application
+ :return: Reference on initialized and ready to use application model.
+ """
+ application = None
+ params = module.params
+ try:
+ application = account.application(params['application'])
+ except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=dict(account))
+
+ if application is None:
+ err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
+ 'correct application name has been passed. If application ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ email = account.owner.email
+ module.fail_json(msg=err_fmt.format(params['application'], email),
+ changed=account.changed, module_cache=dict(account))
+
+ return application
+
+
+def pubnub_keyset(module, account, application):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no keyset with specified name, module will
+ exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be
+ used in case of error to export cached data.
+ :type application: Application
+ :param application: Reference on PubNub application model from which
+ reference on keyset should be fetched.
+
+ :rtype: Keyset
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ params = module.params
+ keyset = application.keyset(params['keyset'])
+ if keyset is None:
+ err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
+ 'sure what correct keyset name has been passed. If keyset ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ module.fail_json(msg=err_fmt.format(params['keyset'],
+ application.name),
+ changed=account.changed, module_cache=dict(account))
+
+ return keyset
+
+
+def pubnub_block(module, account, keyset):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no block with specified name and module
+ configured to start/stop it, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be used in
+ case of error to export cached data.
+ :type keyset: Keyset
+ :param keyset: Reference on keyset model from which reference on block
+ should be fetched.
+
+ :rtype: Block
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ block = None
+ params = module.params
+ try:
+ block = keyset.block(params['name'])
+ except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed, module_cache=dict(account))
+
+ # Report error because block doesn't exists and at the same time
+ # requested to start/stop.
+ if block is None and params['state'] in ['started', 'stopped']:
+ block_name = params.get('name')
+ module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
+ changed=account.changed, module_cache=dict(account))
+
+ if block is None and params['state'] == 'present':
+ block = Block(name=params.get('name'),
+ description=params.get('description'))
+ keyset.add_block(block)
+
+ if block:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+ if params.get('description'):
+ block.description = params.get('description')
+
+ return block
+
+
+def pubnub_event_handler(block, data):
+ """Retrieve reference on target event handler from application model.
+
+ :type block: Block
+ :param block: Reference on block model from which reference on event
+ handlers should be fetched.
+ :type data: dict
+ :param data: Reference on dictionary which contain information about
+ event handler and whether it should be created or not.
+
+ :rtype: EventHandler
+ :return: Reference on initialized and ready to use event handler model.
+ 'None' will be returned in case if there is no handler with
+ specified name and no request to create it.
+ """
+ event_handler = block.event_handler(data['name'])
+
+ # Prepare payload for event handler update.
+ changed_name = (data.pop('changes').get('name')
+ if 'changes' in data else None)
+ name = data.get('name') or changed_name
+ channels = data.get('channels')
+ event = data.get('event')
+ code = _content_of_file_at_path(data.get('src'))
+ state = data.get('state') or 'present'
+
+ # Create event handler if required.
+ if event_handler is None and state == 'present':
+ event_handler = EventHandler(name=name, channels=channels, event=event,
+ code=code)
+ block.add_event_handler(event_handler)
+
+ # Update event handler if required.
+ if event_handler is not None and state == 'present':
+ if name is not None:
+ event_handler.name = name
+ if channels is not None:
+ event_handler.channels = channels
+ if event is not None:
+ event_handler.event = event
+ if code is not None:
+ event_handler.code = code
+
+ return event_handler
+
+
+def _failure_title_from_exception(exception):
+ """Compose human-readable title for module error title.
+
+ Title will be based on status codes if they has been provided.
+ :type exception: exceptions.GeneralPubNubError
+ :param exception: Reference on exception for which title should be
+ composed.
+
+ :rtype: str
+ :return: Reference on error tile which should be shown on module
+ failure.
+ """
+ title = 'General REST API access error.'
+ if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
+ title = 'Authorization error: missing credentials.'
+ elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
+ title = 'Authorization error: wrong credentials.'
+ elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
+ title = 'API access error: insufficient access rights.'
+ elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
+ title = 'API access error: time token expired.'
+ elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
+ title = 'Block create did fail: block with same name already exists).'
+ elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
+ title = 'Unable fetch list of blocks for keyset.'
+ elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
+ title = 'Block creation did fail.'
+ elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
+ title = 'Block update did fail.'
+ elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
+ title = 'Block removal did fail.'
+ elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
+ title = 'Block start/stop did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
+ title = 'Event handler creation did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
+ title = 'Event handler update did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
+ title = 'Event handler removal did fail.'
+
+ return title
+
+
+def _content_of_file_at_path(path):
+ """Read file content.
+
+ Try read content of file at specified path.
+ :type path: str
+ :param path: Full path to location of file which should be read'ed.
+ :rtype: content
+ :return: File content or 'None'
+ """
+ content = None
+ if path and os.path.exists(path):
+ with open(path, mode="rt") as opened_file:
+ b_content = opened_file.read()
+ try:
+ content = to_text(b_content, errors='surrogate_or_strict')
+ except UnicodeError:
+ pass
+
+ return content
+
+
+def main():
+ fields = dict(
+ email=dict(default='', required=False, type='str'),
+ password=dict(default='', required=False, type='str', no_log=True),
+ account=dict(default='', required=False, type='str'),
+ application=dict(required=True, type='str'),
+ keyset=dict(required=True, type='str'),
+ state=dict(default='present', type='str',
+ choices=['started', 'stopped', 'present', 'absent']),
+ name=dict(required=True, type='str'), description=dict(type='str'),
+ event_handlers=dict(default=list(), type='list'),
+ changes=dict(default=dict(), type='dict'),
+ cache=dict(default=dict(), type='dict'),
+ validate_certs=dict(default=True, type='bool'))
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+
+ if not HAS_PUBNUB_BLOCKS_CLIENT:
+ module.fail_json(msg='pubnub_blocks_client required for this module.')
+
+ params = module.params
+
+ # Authorize user.
+ user = pubnub_user(module)
+ # Initialize PubNub account instance.
+ account = pubnub_account(module, user=user)
+ # Try fetch application with which module should work.
+ application = pubnub_application(module, account=account)
+ # Try fetch keyset with which module should work.
+ keyset = pubnub_keyset(module, account=account, application=application)
+ # Try fetch block with which module should work.
+ block = pubnub_block(module, account=account, keyset=keyset)
+ is_new_block = block is not None and block.uid == -1
+
+ # Check whether block should be removed or not.
+ if block is not None and params['state'] == 'absent':
+ keyset.remove_block(block)
+ block = None
+
+ if block is not None:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+
+ # Process event changes to event handlers.
+ for event_handler_data in params.get('event_handlers') or list():
+ state = event_handler_data.get('state') or 'present'
+ event_handler = pubnub_event_handler(data=event_handler_data,
+ block=block)
+ if state == 'absent' and event_handler:
+ block.delete_event_handler(event_handler)
+
+ # Update block operation state if required.
+ if block and not is_new_block:
+ if params['state'] == 'started':
+ block.start()
+ elif params['state'] == 'stopped':
+ block.stop()
+
+ # Save current account state.
+ if not module.check_mode:
+ try:
+ account.save()
+ except (exceptions.APIAccessError, exceptions.KeysetError,
+ exceptions.BlockError, exceptions.EventHandlerError,
+ exceptions.GeneralPubNubError) as exc:
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=module_cache)
+
+ # Report module execution results.
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ changed_will_change = account.changed or account.will_change
+ module.exit_json(changed=changed_will_change, module_cache=module_cache)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py
new file mode 100644
index 00000000..8dbc6b9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py
@@ -0,0 +1,754 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Joe Adams <@sysadmind>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pulp_repo
+author: "Joe Adams (@sysadmind)"
+short_description: Add or remove Pulp repos from a remote host.
+description:
+ - Add or remove Pulp repos from a remote host.
+options:
+ add_export_distributor:
+ description:
+ - Whether or not to add the export distributor to new C(rpm) repositories.
+ type: bool
+ default: no
+ feed:
+ description:
+ - Upstream feed URL to receive updates from.
+ type: str
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the M(ansible.builtin.uri) module only sends
+ authentication information when a webservice responds to an initial
+ request with a 401 status. Since some basic auth services do not
+ properly send a 401, logins will fail. This option forces the sending of
+ the Basic authentication header upon initial request.
+ type: bool
+ default: no
+ generate_sqlite:
+ description:
+ - Boolean flag to indicate whether sqlite files should be generated during
+ a repository publish.
+ required: false
+ type: bool
+ default: no
+ feed_ca_cert:
+ description:
+ - CA certificate string used to validate the feed source SSL certificate.
+ This can be the file content or the path to the file.
+ The ca_cert alias will be removed in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_ca_cert, ca_cert ]
+ feed_client_cert:
+ description:
+ - Certificate used as the client certificate when synchronizing the
+ repository. This is used to communicate authentication information to
+ the feed source. The value to this option must be the full path to the
+ certificate. The specified file may be the certificate itself or a
+ single file containing both the certificate and private key. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_cert. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_cert ]
+ feed_client_key:
+ description:
+ - Private key to the certificate specified in I(importer_ssl_client_cert),
+ assuming it is not included in the certificate file itself. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_key. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_key ]
+ name:
+ description:
+ - Name of the repo to add or remove. This correlates to repo-id in Pulp.
+ required: true
+ type: str
+ aliases: [ repo ]
+ proxy_host:
+ description:
+ - Proxy url setting for the pulp repository importer. This is in the
+ format scheme://host.
+ required: false
+ default: null
+ type: str
+ proxy_port:
+ description:
+ - Proxy port setting for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_username:
+ description:
+ - Proxy username for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_password:
+ description:
+ - Proxy password for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ publish_distributor:
+ description:
+ - Distributor to use when state is C(publish). The default is to
+ publish all distributors.
+ type: str
+ pulp_host:
+ description:
+ - URL of the pulp server to connect to.
+ default: https://127.0.0.1
+ type: str
+ relative_url:
+ description:
+ - Relative URL for the local repository. It's required when state=present.
+ type: str
+ repo_type:
+ description:
+ - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ default: rpm
+ type: str
+ repoview:
+ description:
+ - Whether to generate repoview files for a published repository. Setting
+ this to "yes" automatically activates `generate_sqlite`.
+ required: false
+ type: bool
+ default: no
+ serve_http:
+ description:
+ - Make the repo available over HTTP.
+ type: bool
+ default: no
+ serve_https:
+ description:
+ - Make the repo available over HTTPS.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The repo state. A state of C(sync) will queue a sync of the repo.
+ This is asynchronous but not delayed like a scheduled sync. A state of
+ C(publish) will use the repository's distributor to publish the content.
+ default: present
+ choices: [ "present", "absent", "sync", "publish" ]
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication to the pulp API.
+ If the I(url_username) parameter is not specified, the I(url_password)
+ parameter will not be used.
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication to the pulp API.
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ wait_for_completion:
+ description:
+ - Wait for asynchronous tasks to complete before returning.
+ type: bool
+ default: no
+notes:
+ - This module can currently only create distributors and importers on rpm
+ repositories. Contributions to support other repo types are welcome.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Create a new repo with name 'my_repo'
+ community.general.pulp_repo:
+ name: my_repo
+ relative_url: my/repo
+ state: present
+
+- name: Create a repo with a feed and a relative URL
+ community.general.pulp_repo:
+ name: my_centos_updates
+ repo_type: rpm
+ feed: http://mirror.centos.org/centos/6/updates/x86_64/
+ relative_url: centos/6/updates
+ url_username: admin
+ url_password: admin
+ force_basic_auth: yes
+ state: present
+
+- name: Remove a repo from the pulp server
+ community.general.pulp_repo:
+ name: my_old_repo
+ repo_type: rpm
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: Name of the repo that the action was performed on.
+ returned: success
+ type: str
+ sample: my_repo
+'''
+
+import json
+import os
+from time import sleep
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+
+
+class pulp_server(object):
+ """
+ Class to interact with a Pulp server
+ """
+
+ def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
+ self.module = module
+ self.host = pulp_host
+ self.repo_type = repo_type
+ self.repo_cache = dict()
+ self.wait_for_completion = wait_for_completion
+
+ def check_repo_exists(self, repo_id):
+ try:
+ self.get_repo_config_by_id(repo_id)
+ except IndexError:
+ return False
+ else:
+ return True
+
+ def compare_repo_distributor_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ for key, value in kwargs.items():
+ if key not in distributor['config'].keys():
+ return False
+
+ if not distributor['config'][key] == value:
+ return False
+
+ return True
+
+ def compare_repo_importer_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for importer in repo_config['importers']:
+ for key, value in kwargs.items():
+ if value is not None:
+ if key not in importer['config'].keys():
+ return False
+
+ if not importer['config'][key] == value:
+ return False
+
+ return True
+
+ def create_repo(
+ self,
+ repo_id,
+ relative_url,
+ feed=None,
+ generate_sqlite=False,
+ serve_http=False,
+ serve_https=True,
+ proxy_host=None,
+ proxy_port=None,
+ proxy_username=None,
+ proxy_password=None,
+ repoview=False,
+ ssl_ca_cert=None,
+ ssl_client_cert=None,
+ ssl_client_key=None,
+ add_export_distributor=False
+ ):
+ url = "%s/pulp/api/v2/repositories/" % self.host
+ data = dict()
+ data['id'] = repo_id
+ data['distributors'] = []
+
+ if self.repo_type == 'rpm':
+ yum_distributor = dict()
+ yum_distributor['distributor_id'] = "yum_distributor"
+ yum_distributor['distributor_type_id'] = "yum_distributor"
+ yum_distributor['auto_publish'] = True
+ yum_distributor['distributor_config'] = dict()
+ yum_distributor['distributor_config']['http'] = serve_http
+ yum_distributor['distributor_config']['https'] = serve_https
+ yum_distributor['distributor_config']['relative_url'] = relative_url
+ yum_distributor['distributor_config']['repoview'] = repoview
+ yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(yum_distributor)
+
+ if add_export_distributor:
+ export_distributor = dict()
+ export_distributor['distributor_id'] = "export_distributor"
+ export_distributor['distributor_type_id'] = "export_distributor"
+ export_distributor['auto_publish'] = False
+ export_distributor['distributor_config'] = dict()
+ export_distributor['distributor_config']['http'] = serve_http
+ export_distributor['distributor_config']['https'] = serve_https
+ export_distributor['distributor_config']['relative_url'] = relative_url
+ export_distributor['distributor_config']['repoview'] = repoview
+ export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(export_distributor)
+
+ data['importer_type_id'] = "yum_importer"
+ data['importer_config'] = dict()
+
+ if feed:
+ data['importer_config']['feed'] = feed
+
+ if proxy_host:
+ data['importer_config']['proxy_host'] = proxy_host
+
+ if proxy_port:
+ data['importer_config']['proxy_port'] = proxy_port
+
+ if proxy_username:
+ data['importer_config']['proxy_username'] = proxy_username
+
+ if proxy_password:
+ data['importer_config']['proxy_password'] = proxy_password
+
+ if ssl_ca_cert:
+ data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
+
+ if ssl_client_cert:
+ data['importer_config']['ssl_client_cert'] = ssl_client_cert
+
+ if ssl_client_key:
+ data['importer_config']['ssl_client_key'] = ssl_client_key
+
+ data['notes'] = {
+ "_repo-type": "rpm-repo"
+ }
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 201:
+ self.module.fail_json(
+ msg="Failed to create repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+ else:
+ return True
+
+ def delete_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='DELETE')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to delete repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def get_repo_config_by_id(self, repo_id):
+ if repo_id not in self.repo_cache.keys():
+ repo_array = [x for x in self.repo_list if x['id'] == repo_id]
+ self.repo_cache[repo_id] = repo_array[0]
+
+ return self.repo_cache[repo_id]
+
+ def publish_repo(self, repo_id, publish_distributor):
+ url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
+
+ # If there's no distributor specified, we will publish them all
+ if publish_distributor is None:
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ data = dict()
+ data['id'] = distributor['id']
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=distributor['id'])
+ else:
+ data = dict()
+ data['id'] = publish_distributor
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=publish_distributor)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def sync_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to schedule a sync of the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def update_repo_distributor_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ distributor_url = "%s%s/" % (url, distributor['id'])
+ data = dict()
+ data['distributor_config'] = dict()
+
+ for key, value in kwargs.items():
+ data['distributor_config'][key] = value
+
+ response, info = fetch_url(
+ self.module,
+ distributor_url,
+ data=json.dumps(data),
+ method='PUT')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the relative url for the repository.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ def update_repo_importer_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
+ data = dict()
+ importer_config = dict()
+
+ for key, value in kwargs.items():
+ if value is not None:
+ importer_config[key] = value
+
+ data['importer_config'] = importer_config
+
+ if self.repo_type == 'rpm':
+ data['importer_type_id'] = "yum_importer"
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the repo importer configuration",
+ status_code=info['status'],
+ response=info['msg'],
+ importer_config=importer_config,
+ url=url)
+
+ def set_repo_list(self):
+ url = "%s/pulp/api/v2/repositories/?details=true" % self.host
+ response, info = fetch_url(self.module, url, method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Request failed",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ self.repo_list = json.load(response)
+
+ def verify_tasks_completed(self, response_dict):
+ for task in response_dict['spawned_tasks']:
+ task_url = "%s%s" % (self.host, task['_href'])
+
+ while True:
+ response, info = fetch_url(
+ self.module,
+ task_url,
+ data='',
+ method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Failed to check async task status.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=task_url)
+
+ task_dict = json.load(response)
+
+ if task_dict['state'] == 'finished':
+ return True
+
+ if task_dict['state'] == 'error':
+ self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
+
+ sleep(2)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ add_export_distributor=dict(default=False, type='bool'),
+ feed=dict(),
+ generate_sqlite=dict(default=False, type='bool'),
+ feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'],
+ deprecated_aliases=[dict(name='ca_cert', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ feed_client_cert=dict(aliases=['importer_ssl_client_cert']),
+ feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True),
+ name=dict(required=True, aliases=['repo']),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ proxy_username=dict(),
+ proxy_password=dict(no_log=True),
+ publish_distributor=dict(),
+ pulp_host=dict(default="https://127.0.0.1"),
+ relative_url=dict(),
+ repo_type=dict(default="rpm"),
+ repoview=dict(default=False, type='bool'),
+ serve_http=dict(default=False, type='bool'),
+ serve_https=dict(default=True, type='bool'),
+ state=dict(
+ default="present",
+ choices=['absent', 'present', 'sync', 'publish']),
+ wait_for_completion=dict(default=False, type="bool"))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ add_export_distributor = module.params['add_export_distributor']
+ feed = module.params['feed']
+ generate_sqlite = module.params['generate_sqlite']
+ importer_ssl_ca_cert = module.params['feed_ca_cert']
+ importer_ssl_client_cert = module.params['feed_client_cert']
+ if importer_ssl_client_cert is None and module.params['client_cert'] is not None:
+ importer_ssl_client_cert = module.params['client_cert']
+ module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the "
+ "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since "
+ "Ansible 2.9.2). Until community.general 3.0.0, the default value for `feed_client_cert` will be "
+ "taken from `client_cert` if only the latter is specified",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ importer_ssl_client_key = module.params['feed_client_key']
+ if importer_ssl_client_key is None and module.params['client_key'] is not None:
+ importer_ssl_client_key = module.params['client_key']
+ module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until community.general 3.0.0 the default "
+ "value will come from client_key option",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ proxy_host = module.params['proxy_host']
+ proxy_port = module.params['proxy_port']
+ proxy_username = module.params['proxy_username']
+ proxy_password = module.params['proxy_password']
+ publish_distributor = module.params['publish_distributor']
+ pulp_host = module.params['pulp_host']
+ relative_url = module.params['relative_url']
+ repo = module.params['name']
+ repo_type = module.params['repo_type']
+ repoview = module.params['repoview']
+ serve_http = module.params['serve_http']
+ serve_https = module.params['serve_https']
+ state = module.params['state']
+ wait_for_completion = module.params['wait_for_completion']
+
+ if (state == 'present') and (not relative_url):
+ module.fail_json(msg="When state is present, relative_url is required.")
+
+ # Ensure that the importer_ssl_* is the content and not a file path
+ if importer_ssl_ca_cert is not None:
+ importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
+ if os.path.isfile(importer_ssl_ca_cert_file_path):
+ importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
+ try:
+ importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
+ finally:
+ importer_ssl_ca_cert_file_object.close()
+
+ if importer_ssl_client_cert is not None:
+ importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
+ if os.path.isfile(importer_ssl_client_cert_file_path):
+ importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
+ try:
+ importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
+ finally:
+ importer_ssl_client_cert_file_object.close()
+
+ if importer_ssl_client_key is not None:
+ importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
+ if os.path.isfile(importer_ssl_client_key_file_path):
+ importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
+ try:
+ importer_ssl_client_key = importer_ssl_client_key_file_object.read()
+ finally:
+ importer_ssl_client_key_file_object.close()
+
+ server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
+ server.set_repo_list()
+ repo_exists = server.check_repo_exists(repo)
+
+ changed = False
+
+ if state == 'absent' and repo_exists:
+ if not module.check_mode:
+ server.delete_repo(repo)
+
+ changed = True
+
+ if state == 'sync':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be synced.")
+
+ if not module.check_mode:
+ server.sync_repo(repo)
+
+ changed = True
+
+ if state == 'publish':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be published.")
+
+ if not module.check_mode:
+ server.publish_repo(repo, publish_distributor)
+
+ changed = True
+
+ if state == 'present':
+ if not repo_exists:
+ if not module.check_mode:
+ server.create_repo(
+ repo_id=repo,
+ relative_url=relative_url,
+ feed=feed,
+ generate_sqlite=generate_sqlite,
+ serve_http=serve_http,
+ serve_https=serve_https,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ repoview=repoview,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key,
+ add_export_distributor=add_export_distributor)
+
+ changed = True
+
+ else:
+ # Check to make sure all the settings are correct
+ # The importer config gets overwritten on set and not updated, so
+ # we set the whole config at the same time.
+ if not server.compare_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key
+ ):
+ if not module.check_mode:
+ server.update_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key)
+
+ changed = True
+
+ if relative_url is not None:
+ if not server.compare_repo_distributor_config(
+ repo,
+ relative_url=relative_url
+ ):
+ if not module.check_mode:
+ server.update_repo_distributor_config(
+ repo,
+ relative_url=relative_url)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, repoview=repoview):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, repoview=repoview)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, http=serve_http):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, http=serve_http)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, https=serve_https):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, https=serve_https)
+
+ changed = True
+
+ module.exit_json(changed=changed, repo=repo)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py
new file mode 100644
index 00000000..db8c0ec8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner.
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ type: str
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ type: str
+ modulepath:
+ description:
+ - Path to an alternate location for puppet modules.
+ type: str
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ type: str
+ noop:
+ description:
+ - Override puppet.conf noop mode.
+ - When C(yes), run Puppet agent with C(--noop) switch set.
+ - When C(no), run Puppet agent with C(--no-noop) switch set.
+ - When unset (default), use default or puppet.conf value if defined.
+ type: bool
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts.
+ type: dict
+ facter_basename:
+ description:
+ - Basename of the facter output file.
+ type: str
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ type: str
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used.
+ - C(all) will go to both C(stdout) and C(syslog).
+ type: str
+ choices: [ all, stdout, syslog ]
+ default: stdout
+ certname:
+ description:
+ - The name to use when handling certificates.
+ type: str
+ tags:
+ description:
+ - A list of puppet tags to be used.
+ type: list
+ elements: str
+ execute:
+ description:
+ - Execute a specific piece of Puppet code.
+ - It has no effect with a puppetmaster.
+ type: str
+ use_srv_records:
+ description:
+ - Toggles use_srv_records flag
+ type: bool
+ summarize:
+ description:
+ - Whether to print a transaction summary.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Print extra information.
+ type: bool
+ default: false
+ debug:
+ description:
+ - Enable full debugging.
+ type: bool
+ default: false
+requirements:
+- puppet
+author:
+- Monty Taylor (@emonty)
+'''
+
+EXAMPLES = r'''
+- name: Run puppet agent and fail if anything goes wrong
+ community.general.puppet:
+
+- name: Run puppet and timeout in 5 minutes
+ community.general.puppet:
+ timeout: 5m
+
+- name: Run puppet using a different environment
+ community.general.puppet:
+ environment: testing
+
+- name: Run puppet using a specific certname
+ community.general.puppet:
+ certname: agent01.example.com
+
+- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
+ community.general.puppet:
+ execute: include ::mymodule
+
+- name: Run puppet using a specific tags
+ community.general.puppet:
+ tags:
+ - update
+ - nginx
+
+- name: Run puppet agent in noop mode
+ community.general.puppet:
+ noop: yes
+
+- name: Run a manifest with debug, log to both syslog and stdout, specify module path
+ community.general.puppet:
+ modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ logdest: all
+ manifest: /var/lib/example/puppet_step_config.pp
+'''
+
+import json
+import os
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def _get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(type='str', default='30m'),
+ puppetmaster=dict(type='str'),
+ modulepath=dict(type='str'),
+ manifest=dict(type='str'),
+ noop=dict(required=False, type='bool'),
+ logdest=dict(type='str', default='stdout', choices=['all',
+ 'stdout',
+ 'syslog']),
+ # internal code to work with --diff, do not use
+ show_diff=dict(type='bool', default=False, aliases=['show-diff']),
+ facts=dict(type='dict'),
+ facter_basename=dict(type='str', default='ansible'),
+ environment=dict(type='str'),
+ certname=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ execute=dict(type='str'),
+ summarize=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ use_srv_records=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ('puppetmaster', 'modulepath'),
+ ],
+ )
+ p = module.params
+
+ global PUPPET_CMD
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
+
+ if not PUPPET_CMD:
+ module.fail_json(
+ msg="Could not find puppet. Please ensure it is installed.")
+
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ rc, stdout, stderr = module.run_command(
+ PUPPET_CMD + " config print agent_disabled_lockfile")
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ _get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=shlex_quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
+
+ if not p['manifest'] and not p['execute']:
+ cmd = ("%(base_cmd)s agent --onetime"
+ " --no-daemonize --no-usecacheonfailure --no-splay"
+ " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
+ if p['puppetmaster']:
+ cmd += " --server %s" % shlex_quote(p['puppetmaster'])
+ if p['show_diff']:
+ cmd += " --show_diff"
+ if p['environment']:
+ cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if module.check_mode:
+ cmd += " --noop"
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['use_srv_records'] is not None:
+ if not p['use_srv_records']:
+ cmd += " --no-use_srv_records"
+ else:
+ cmd += " --use_srv_records"
+ else:
+ cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
+ if p['logdest'] == 'all':
+ cmd += " --logdest syslog --logdest stdout"
+ if p['modulepath']:
+ cmd += "--modulepath='%s'" % p['modulepath']
+ if p['environment']:
+ cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if module.check_mode:
+ cmd += "--noop "
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ else:
+ cmd += " %s" % shlex_quote(p['manifest'])
+ if p['summarize']:
+ cmd += " --summarize"
+ if p['debug']:
+ cmd += " --debug"
+ if p['verbose']:
+ cmd += " --verbose"
+ rc, stdout, stderr = module.run_command(cmd)
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py
new file mode 100644
index 00000000..5e8b5932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefa_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flasharray.purefa_info) instead.
+short_description: Collect facts from Pure Storage FlashArray
+description:
+ - Collect facts information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
+ type: list
+ required: false
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fa
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefa_facts:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect configuration and capacity facts
+ community.general.purefa_facts:
+ gather_subset:
+ - config
+ - capacity
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect all facts
+ community.general.purefa_facts:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {},
+ "config": {
+ "directory_service": {
+ "array_admin_group": null,
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "check_peer": false,
+ "enabled": false,
+ "group_base": null,
+ "readonly_group": null,
+ "storage_admin_group": null,
+ "uri": []
+ },
+ "dns": {
+ "domain": "domain.com",
+ "nameservers": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "ntp": [
+ "0.ntp.pool.org",
+ "1.ntp.pool.org",
+ "2.ntp.pool.org",
+ "3.ntp.pool.org"
+ ],
+ "smtp": [
+ {
+ "enabled": true,
+ "name": "alerts@acme.com"
+ },
+ {
+ "enabled": true,
+ "name": "user@acme.com"
+ }
+ ],
+ "snmp": [
+ {
+ "auth_passphrase": null,
+ "auth_protocol": null,
+ "community": null,
+ "host": "localhost",
+ "name": "localhost",
+ "privacy_passphrase": null,
+ "privacy_protocol": null,
+ "user": null,
+ "version": "v2c"
+ }
+ ],
+ "ssl_certs": {
+ "country": null,
+ "email": null,
+ "issued_by": "",
+ "issued_to": "",
+ "key_size": 2048,
+ "locality": null,
+ "organization": "Acme Storage, Inc.",
+ "organizational_unit": "Acme Storage, Inc.",
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "2017-08-11T23:09:06Z",
+ "valid_to": "2027-08-09T23:09:06Z"
+ },
+ "syslog": []
+ },
+ "default": {
+ "array_name": "flasharray1",
+ "connected_arrays": 1,
+ "hostgroups": 0,
+ "hosts": 10,
+ "pods": 3,
+ "protection_groups": 1,
+ "purity_version": "5.0.4",
+ "snapshots": 1,
+ "volume_groups": 2
+ },
+ "hgroups": {},
+ "hosts": {
+ "host1": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:2f6f5715a533"
+ ],
+ "wwn": []
+ },
+ "host2": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:d17fb13fe0b"
+ ],
+ "wwn": []
+ },
+ "host3": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:97b1351bfb2"
+ ],
+ "wwn": []
+ },
+ "host4": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:dd84e9a7b2cb"
+ ],
+ "wwn": [
+ "10000000C96C48D1",
+ "10000000C96C48D2"
+ ]
+ }
+ },
+ "interfaces": {
+ "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
+ },
+ "network": {
+ "ct0.eth0": {
+ "address": "10.10.10.10",
+ "gateway": "10.10.10.1",
+ "hwaddr": "ec:f4:bb:c8:8a:04",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct0.eth2": {
+ "address": "10.10.10.11",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:00",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth3": {
+ "address": "10.10.10.12",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:02",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth4": {
+ "address": "10.10.10.13",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth5": {
+ "address": "10.10.10.14",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0d",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "vir0": {
+ "address": "10.10.10.20",
+ "gateway": "10.10.10.1",
+ "hwaddr": "fe:ba:e9:e7:6b:0f",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ }
+ },
+ "offload": {
+ "nfstarget": {
+ "address": "10.0.2.53",
+ "mount_options": null,
+ "mount_point": "/offload",
+ "protocol": "nfs",
+ "status": "scanning"
+ }
+ },
+ "performance": {
+ "input_per_sec": 8191,
+ "output_per_sec": 0,
+ "queue_depth": 1,
+ "reads_per_sec": 0,
+ "san_usec_per_write_op": 15,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 642,
+ "writes_per_sec": 2
+ },
+ "pgroups": {
+ "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
+ "hgroups": null,
+ "hosts": null,
+ "source": "host1",
+ "targets": null,
+ "volumes": [
+ "volume-1"
+ ]
+ }
+ },
+ "pods": {
+ "srm-pod": {
+ "arrays": [
+ {
+ "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
+ "mediator_status": "online",
+ "name": "sn1-405-c09-37",
+ "status": "online"
+ },
+ {
+ "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
+ "mediator_status": "online",
+ "name": "sn1-420-c11-31",
+ "status": "online"
+ }
+ ],
+ "source": null
+ }
+ },
+ "snapshots": {
+ "consisgroup.cgsnapshot": {
+ "created": "2018-03-28T09:34:02Z",
+ "size": 13958643712,
+ "source": "volume-1"
+ }
+ },
+ "subnet": {},
+ "vgroups": {
+ "vvol--vSphere-HA-0ffc7dd1-vg": {
+ "volumes": [
+ "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
+ ]
+ }
+ },
+ "volumes": {
+ "ansible_data": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "host1",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B000114A6",
+ "size": 1099511627776,
+ "source": null
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
+
+
+ADMIN_API_VERSION = '1.14'
+S3_REQUIRED_API_VERSION = '1.16'
+LATENCY_REQUIRED_API_VERSION = '1.16'
+AC_REQUIRED_API_VERSION = '1.14'
+CAP_REQUIRED_API_VERSION = '1.6'
+SAN_REQUIRED_API_VERSION = '1.10'
+NVME_API_VERSION = '1.16'
+PREFERRED_API_VERSION = '1.15'
+CONN_STATUS_API_VERSION = '1.17'
+
+
+def generate_default_dict(array):
+ default_facts = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_facts['volume_groups'] = len(array.list_vgroups())
+ default_facts['connected_arrays'] = len(array.list_array_connections())
+ default_facts['pods'] = len(array.list_pods())
+ default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ # Old FA arrays only report model from the primary controller
+ ct0_model = array.get_hardware('CT0')['model']
+ if ct0_model:
+ model = ct0_model
+ else:
+ ct1_model = array.get_hardware('CT1')['model']
+ model = ct1_model
+ default_facts['array_model'] = model
+ default_facts['array_name'] = defaults['array_name']
+ default_facts['purity_version'] = defaults['version']
+ default_facts['hosts'] = len(hosts)
+ default_facts['snapshots'] = len(snaps)
+ default_facts['protection_groups'] = len(pgroups)
+ default_facts['hostgroups'] = len(hgroups)
+ default_facts['admins'] = len(admins)
+ return default_facts
+
+
+def generate_perf_dict(array):
+ perf_facts = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action='monitor', latency=True)[0]
+ perf_info = array.get(action='monitor')[0]
+ # IOPS
+ perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
+ perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
+
+ # Bandwidth
+ perf_facts['input_per_sec'] = perf_info['input_per_sec']
+ perf_facts['output_per_sec'] = perf_info['output_per_sec']
+
+ # Latency
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
+ perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
+ perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
+ perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
+ perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
+ perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
+ perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
+ perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
+ perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
+ perf_facts['queue_depth'] = perf_info['queue_depth']
+ return perf_facts
+
+
+def generate_config_dict(array):
+ config_facts = {}
+ api_version = array._list_available_rest_versions()
+ # DNS
+ config_facts['dns'] = array.get_dns()
+ # SMTP
+ config_facts['smtp'] = array.list_alert_recipients()
+ # SNMP
+ config_facts['snmp'] = array.list_snmp_managers()
+ config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
+ # DS
+ config_facts['directory_service'] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['directory_service_roles'] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]['name']
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles[role]['group'],
+ 'group_base': roles[role]['group_base'],
+ }
+ else:
+ config_facts['directory_service'].update(array.get_directory_service(groups=True))
+ # NTP
+ config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Phonehome
+ config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
+ # Proxy
+ config_facts['proxy'] = array.get(proxy=True)['proxy']
+ # Relay Host
+ config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
+ # Sender Domain
+ config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Idle Timeout
+ config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
+ # SCSI Timeout
+ config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
+ # SSL
+ config_facts['ssl_certs'] = array.get_certificate()
+ # Global Admin settings
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['global_admin'] = array.get_global_admin_attributes()
+ return config_facts
+
+
+def generate_admin_dict(array):
+ api_version = array._list_available_rest_versions()
+ admin_facts = {}
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]['name']
+ admin_facts[admin_name] = {
+ 'type': admins[admin]['type'],
+ 'role': admins[admin]['role'],
+ }
+ return admin_facts
+
+
+def generate_subnet_dict(array):
+ sub_facts = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]['name']
+ if subnets[sub]['enabled']:
+ sub_facts[sub_name] = {
+ 'gateway': subnets[sub]['gateway'],
+ 'mtu': subnets[sub]['mtu'],
+ 'vlan': subnets[sub]['vlan'],
+ 'prefix': subnets[sub]['prefix'],
+ 'interfaces': subnets[sub]['interfaces'],
+ 'services': subnets[sub]['services'],
+ }
+ return sub_facts
+
+
+def generate_network_dict(array):
+ net_facts = {}
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ net_facts[int_name] = {
+ 'hwaddr': ports[port]['hwaddr'],
+ 'mtu': ports[port]['mtu'],
+ 'enabled': ports[port]['enabled'],
+ 'speed': ports[port]['speed'],
+ 'address': ports[port]['address'],
+ 'slaves': ports[port]['slaves'],
+ 'services': ports[port]['services'],
+ 'gateway': ports[port]['gateway'],
+ 'netmask': ports[port]['netmask'],
+ }
+ if ports[port]['subnet']:
+ subnets = array.get_subnet(ports[port]['subnet'])
+ if subnets['enabled']:
+ net_facts[int_name]['subnet'] = {
+ 'name': subnets['name'],
+ 'prefix': subnets['prefix'],
+ 'vlan': subnets['vlan'],
+ }
+ return net_facts
+
+
+def generate_capacity_dict(array):
+ capacity_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]['capacity']
+ used_space = capacity[0]["total"]
+ capacity_facts['free_space'] = total_capacity - used_space
+ capacity_facts['total_capacity'] = total_capacity
+ capacity_facts['data_reduction'] = capacity[0]['data_reduction']
+ capacity_facts['system_space'] = capacity[0]['system']
+ capacity_facts['volume_space'] = capacity[0]['volumes']
+ capacity_facts['shared_space'] = capacity[0]['shared_space']
+ capacity_facts['snapshot_space'] = capacity[0]['snapshots']
+ capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
+ capacity_facts['total_reduction'] = capacity[0]['total_reduction']
+
+ return capacity_facts
+
+
+def generate_snap_dict(array):
+ snap_facts = {}
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_facts[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ }
+ return snap_facts
+
+
+def generate_vol_dict(array):
+ volume_facts = {}
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_facts[volume] = {
+ 'source': vols[vol]['source'],
+ 'size': vols[vol]['size'],
+ 'serial': vols[vol]['serial'],
+ 'hosts': [],
+ 'bandwidth': ""
+ }
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]['name']
+ qos = qvols[qvol]['bandwidth_limit']
+ volume_facts[volume]['bandwidth'] = qos
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]['name']
+ volume_facts[volume] = {
+ 'source': vvols[vvol]['source'],
+ 'serial': vvols[vvol]['serial'],
+ 'hosts': []
+ }
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]['name']
+ voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
+ volume_facts[volume]['hosts'].append(voldict)
+ return volume_facts
+
+
+def generate_host_dict(array):
+ api_version = array._list_available_rest_versions()
+ host_facts = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ tports = []
+ host_all_info = array.get_host(hostname, all=True)
+ if host_all_info:
+ tports = host_all_info[0]['target_port']
+ host_facts[hostname] = {
+ 'hgroup': hosts[host]['hgroup'],
+ 'iqn': hosts[host]['iqn'],
+ 'wwn': hosts[host]['wwn'],
+ 'personality': array.get_host(hostname,
+ personality=True)['personality'],
+ 'target_port': tports
+ }
+ if NVME_API_VERSION in api_version:
+ host_facts[hostname]['nqn'] = hosts[host]['nqn']
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
+ return host_facts
+
+
+def generate_pgroups_dict(array):
+ pgroups_facts = {}
+ pgroups = array.list_pgroups()
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]['name']
+ pgroups_facts[protgroup] = {
+ 'hgroups': pgroups[pgroup]['hgroups'],
+ 'hosts': pgroups[pgroup]['hosts'],
+ 'source': pgroups[pgroup]['source'],
+ 'targets': pgroups[pgroup]['targets'],
+ 'volumes': pgroups[pgroup]['volumes'],
+ }
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
+ pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
+ pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
+ pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
+ pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
+ pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
+ pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
+ pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
+ pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
+ pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
+ pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
+ pgroups_facts[protgroup]['days'] = prot_reten['days']
+ pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
+ pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
+ if ":" in protgroup:
+ snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
+ pgroups_facts[protgroup]['snaps'] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]['name']
+ pgroups_facts[protgroup]['snaps'][snap] = {
+ 'created': snap_transfers[snap_transfer]['created'],
+ 'started': snap_transfers[snap_transfer]['started'],
+ 'completed': snap_transfers[snap_transfer]['completed'],
+ 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
+ 'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
+ 'progress': snap_transfers[snap_transfer]['progress'],
+ }
+ return pgroups_facts
+
+
+def generate_pods_dict(array):
+ pods_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods()
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_facts[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ }
+ return pods_facts
+
+
+def generate_conn_array_dict(array):
+ conn_array_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CONN_STATUS_API_VERSION in api_version:
+ carrays = array.list_connected_arrays()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]['array_name']
+ conn_array_facts[arrayname] = {
+ 'array_id': carrays[carray]['id'],
+ 'throtled': carrays[carray]['throtled'],
+ 'version': carrays[carray]['version'],
+ 'type': carrays[carray]['type'],
+ 'mgmt_ip': carrays[carray]['management_address'],
+ 'repl_ip': carrays[carray]['replication_address'],
+ }
+ if CONN_STATUS_API_VERSION in api_version:
+ conn_array_facts[arrayname]['status'] = carrays[carray]['status']
+ return conn_array_facts
+
+
+def generate_apps_dict(array):
+ apps_facts = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]['name']
+ apps_facts[appname] = {
+ 'version': apps[app]['version'],
+ 'status': apps[app]['status'],
+ 'description': apps[app]['description'],
+ }
+ return apps_facts
+
+
+def generate_vgroups_dict(array):
+ vgroups_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups()
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]['name']
+ vgroups_facts[virtgroup] = {
+ 'volumes': vgroups[vgroup]['volumes'],
+ }
+ return vgroups_facts
+
+
+def generate_nfs_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'mount_point': offload[target]['mount_point'],
+ 'protocol': offload[target]['protocol'],
+ 'mount_options': offload[target]['mount_options'],
+ 'address': offload[target]['address'],
+ }
+ return offload_facts
+
+
+def generate_s3_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'bucket': offload[target]['bucket'],
+ 'protocol': offload[target]['protocol'],
+ 'access_key_id': offload[target]['access_key_id'],
+ }
+ return offload_facts
+
+
+def generate_hgroups_dict(array):
+ hgroups_facts = {}
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]['name']
+ hgroups_facts[hostgroup] = {
+ 'hosts': hgroups[hgroup]['hosts'],
+ 'pgs': [],
+ 'vols': [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]['name']
+ hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]['name']
+ volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
+ hgroups_facts[pgname]['vols'].append(volpgdict)
+ return hgroups_facts
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_facts = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ if ports[port]['wwn']:
+ int_facts[int_name] = ports[port]['wwn']
+ if ports[port]['iqn']:
+ int_facts[int_name] = ports[port]['iqn']
+ if NVME_API_VERSION in api_version:
+ if ports[port]['nqn']:
+ int_facts[int_name] = ports[port]['nqn']
+ return int_facts
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ array = get_system(module)
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
+ 'hosts', 'admins', 'volumes', 'snapshots', 'pods',
+ 'vgroups', 'offload', 'apps', 'arrays')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(array)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(array)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(array)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(array)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(array)
+ if 'subnet' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(array)
+ if 'interfaces' in subset or 'all' in subset:
+ facts['interfaces'] = generate_interfaces_dict(array)
+ if 'hosts' in subset or 'all' in subset:
+ facts['hosts'] = generate_host_dict(array)
+ if 'volumes' in subset or 'all' in subset:
+ facts['volumes'] = generate_vol_dict(array)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(array)
+ if 'hgroups' in subset or 'all' in subset:
+ facts['hgroups'] = generate_hgroups_dict(array)
+ if 'pgroups' in subset or 'all' in subset:
+ facts['pgroups'] = generate_pgroups_dict(array)
+ if 'pods' in subset or 'all' in subset:
+ facts['pods'] = generate_pods_dict(array)
+ if 'admins' in subset or 'all' in subset:
+ facts['admins'] = generate_admin_dict(array)
+ if 'vgroups' in subset or 'all' in subset:
+ facts['vgroups'] = generate_vgroups_dict(array)
+ if 'offload' in subset or 'all' in subset:
+ facts['nfs_offload'] = generate_nfs_offload_dict(array)
+ facts['s3_offload'] = generate_s3_offload_dict(array)
+ if 'apps' in subset or 'all' in subset:
+ facts['apps'] = generate_apps_dict(array)
+ if 'arrays' in subset or 'all' in subset:
+ facts['arrays'] = generate_conn_array_dict(array)
+
+ module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py
new file mode 100644
index 00000000..8c5a40c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py
@@ -0,0 +1,652 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefb_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flashblade.purefb_info) instead.
+short_description: Collect facts from Pure Storage FlashBlade
+description:
+ - Collect facts information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems and snapshots.
+ required: false
+ type: list
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fb
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefb_facts:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect configuration and capacity facts
+ community.general.purefb_facts:
+ gather_subset:
+ - config
+ - capacity
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect all facts
+ community.general.purefb_facts:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "*(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+HARD_LIMIT_API_VERSION = '1.4'
+
+
+def generate_default_dict(blade):
+ default_facts = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_facts['flashblade_name'] = defaults.name
+ default_facts['purity_version'] = defaults.version
+ default_facts['filesystems'] = \
+ len(blade.file_systems.list_file_systems().items)
+ default_facts['snapshots'] = \
+ len(blade.file_system_snapshots.list_file_system_snapshots().items)
+ default_facts['buckets'] = len(blade.buckets.list_buckets().items)
+ default_facts['object_store_users'] = \
+ len(blade.object_store_users.list_object_store_users().items)
+ default_facts['object_store_accounts'] = \
+ len(blade.object_store_accounts.list_object_store_accounts().items)
+ default_facts['blades'] = len(blade.blade.list_blades().items)
+ default_facts['total_capacity'] = \
+ blade.arrays.list_arrays_space().items[0].capacity
+ return default_facts
+
+
+def generate_perf_dict(blade):
+ perf_facts = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol='http')
+ s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
+ nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
+ perf_facts['aggregate'] = {
+ 'bytes_per_op': total_perf.items[0].bytes_per_op,
+ 'bytes_per_read': total_perf.items[0].bytes_per_read,
+ 'bytes_per_write': total_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': total_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': total_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': total_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': total_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': total_perf.items[0].writes_per_sec,
+ }
+ perf_facts['http'] = {
+ 'bytes_per_op': http_perf.items[0].bytes_per_op,
+ 'bytes_per_read': http_perf.items[0].bytes_per_read,
+ 'bytes_per_write': http_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': http_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': http_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': http_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': http_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': http_perf.items[0].writes_per_sec,
+ }
+ perf_facts['s3'] = {
+ 'bytes_per_op': s3_perf.items[0].bytes_per_op,
+ 'bytes_per_read': s3_perf.items[0].bytes_per_read,
+ 'bytes_per_write': s3_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': s3_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': s3_perf.items[0].writes_per_sec,
+ }
+ perf_facts['nfs'] = {
+ 'bytes_per_op': nfs_perf.items[0].bytes_per_op,
+ 'bytes_per_read': nfs_perf.items[0].bytes_per_read,
+ 'bytes_per_write': nfs_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': nfs_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': nfs_perf.items[0].writes_per_sec,
+ }
+
+ return perf_facts
+
+
+def generate_config_dict(blade):
+ config_facts = {}
+ config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
+ config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
+ config_facts['alert_watchers'] = \
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_facts['array_management'] = \
+ blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
+ config_facts['directory_service_roles'] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles.items[role].group,
+ 'group_base': roles.items[role].group_base
+ }
+ config_facts['nfs_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
+ config_facts['smb_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
+ config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_facts['ssl_certs'] = \
+ blade.certificates.list_certificates().items[0].to_dict()
+ return config_facts
+
+
+def generate_subnet_dict(blade):
+ sub_facts = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_facts[sub_name] = {
+ 'gateway': subnets.items[sub].gateway,
+ 'mtu': subnets.items[sub].mtu,
+ 'vlan': subnets.items[sub].vlan,
+ 'prefix': subnets.items[sub].prefix,
+ 'services': subnets.items[sub].services,
+ }
+ sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
+ sub_facts[sub_name]['interfaces'] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
+ return sub_facts
+
+
+def generate_lag_dict(blade):
+ lag_facts = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_facts[lag_name] = {
+ 'lag_speed': groups.items[groupcnt].lag_speed,
+ 'port_speed': groups.items[groupcnt].port_speed,
+ 'status': groups.items[groupcnt].status,
+ }
+ lag_facts[lag_name]['ports'] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
+ return lag_facts
+
+
+def generate_network_dict(blade):
+ net_facts = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_facts[int_name] = {
+ 'type': ports.items[portcnt].type,
+ 'mtu': ports.items[portcnt].mtu,
+ 'vlan': ports.items[portcnt].vlan,
+ 'address': ports.items[portcnt].address,
+ 'services': ports.items[portcnt].services,
+ 'gateway': ports.items[portcnt].gateway,
+ 'netmask': ports.items[portcnt].netmask,
+ }
+ return net_facts
+
+
+def generate_capacity_dict(blade):
+ capacity_facts = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type='file-system')
+ object_cap = blade.arrays.list_arrays_space(type='object-store')
+ capacity_facts['total'] = total_cap.items[0].capacity
+ capacity_facts['aggregate'] = {
+ 'data_reduction': total_cap.items[0].space.data_reduction,
+ 'snapshots': total_cap.items[0].space.snapshots,
+ 'total_physical': total_cap.items[0].space.total_physical,
+ 'unique': total_cap.items[0].space.unique,
+ 'virtual': total_cap.items[0].space.virtual,
+ }
+ capacity_facts['file-system'] = {
+ 'data_reduction': file_cap.items[0].space.data_reduction,
+ 'snapshots': file_cap.items[0].space.snapshots,
+ 'total_physical': file_cap.items[0].space.total_physical,
+ 'unique': file_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+ capacity_facts['object-store'] = {
+ 'data_reduction': object_cap.items[0].space.data_reduction,
+ 'snapshots': object_cap.items[0].space.snapshots,
+ 'total_physical': object_cap.items[0].space.total_physical,
+ 'unique': object_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+
+ return capacity_facts
+
+
+def generate_snap_dict(blade):
+ snap_facts = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_facts[snapshot] = {
+ 'destroyed': snaps.items[snap].destroyed,
+ 'source': snaps.items[snap].source,
+ 'suffix': snaps.items[snap].suffix,
+ 'source_destroyed': snaps.items[snap].source_destroyed,
+ }
+ return snap_facts
+
+
+def generate_fs_dict(blade):
+ fs_facts = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_facts[share] = {
+ 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
+ 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
+ 'provisioned': fsys.items[fsystem].provisioned,
+ 'destroyed': fsys.items[fsystem].destroyed,
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
+ if fsys.items[fsystem].nfs.enabled:
+ fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
+
+ return fs_facts
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnets', 'lags',
+ 'filesystems', 'snapshots')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(blade)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(blade)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(blade)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(blade)
+ if 'lags' in subset or 'all' in subset:
+ facts['lag'] = generate_lag_dict(blade)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(blade)
+ if 'subnets' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(blade)
+ if 'filesystems' in subset or 'all' in subset:
+ facts['filesystems'] = generate_fs_dict(blade)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(blade)
+
+ module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py
new file mode 100644
index 00000000..ab27fd5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+options:
+ api_key:
+ type: str
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ type: str
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ device:
+ type: str
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ push_type:
+ type: str
+ description:
+ - Thing you wish to push.
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ type: str
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ type: str
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+- name: Sends a push notification to a device
+ community.general.pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+- name: Sends a link to a device
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ device: Chrome
+ push_type: link
+ title: Ansible Documentation
+ body: https://docs.ansible.com/
+
+- name: Sends a push notification to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: Broadcasting a message to the #my-awesome-channel folks
+
+- name: Sends a push notification with title and body to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: ALERT! Signup service is down
+ body: Error rate on signup service is over 90% for more than 2 minutes
+'''
+
+import traceback
+
+PUSHBULLET_IMP_ERR = None
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ PUSHBULLET_IMP_ERR = traceback.format_exc()
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
+ ),
+ mutually_exclusive=(
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py
new file mode 100644
index 00000000..7f73592a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# Copyright (c) 2019, Bernd Arnold <wopfel@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pushover
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ type: str
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ type: str
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ title:
+ type: str
+ description:
+ - Message title.
+ required: false
+ pri:
+ type: str
+ description:
+ - Message priority (see U(https://pushover.net) for details).
+ required: false
+ default: '0'
+ choices: [ '-2', '-1', '0', '1', '2' ]
+ device:
+ type: str
+ description:
+ - A device the message should be sent to. Multiple devices can be specified, separated by a comma.
+ required: false
+ version_added: 1.2.0
+
+author:
+ - "Jim Richardson (@weaselkeeper)"
+ - "Bernd Arnold (@wopfel)"
+'''
+
+EXAMPLES = '''
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} is acting strange ...'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ title: 'Alert!'
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ pri: 1
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net to a specific device
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} has been lost somewhere'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ device: admins-iPhone
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg, title, device):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s/1/messages.json' % (self.base_uri)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+
+ if title is not None:
+ options = dict(options,
+ title=title)
+
+ if device is not None:
+ options = dict(options,
+ device=device)
+
+ data = urlencode(options)
+
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ title=dict(type='str'),
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ device=dict(type='str'),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device'])
+ except Exception:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py
new file mode 100644
index 00000000..9f7df5c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py
@@ -0,0 +1,897 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax
+short_description: create / delete an instance in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud instance and optionally
+ waits for it to be 'running'.
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number with the name of the
+ created servers. Only applicable when used with the I(group) attribute
+ or meta key.
+ type: bool
+ default: 'yes'
+ boot_from_volume:
+ description:
+ - Whether or not to boot the instance from a Cloud Block Storage volume.
+ If C(yes) and I(image) is specified a new volume will be created at
+ boot time. I(boot_volume_size) is required with I(image) to create a
+ new volume at boot time.
+ type: bool
+ default: 'no'
+ boot_volume:
+ type: str
+ description:
+ - Cloud Block Storage ID or Name to use as the boot volume of the
+ instance
+ boot_volume_size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes. This is only required with
+ I(image) and I(boot_from_volume).
+ default: 100
+ boot_volume_terminate:
+ description:
+ - Whether the I(boot_volume) or newly created volume from I(image) will
+ be terminated when the server is terminated
+ type: bool
+ default: 'no'
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ count:
+ type: int
+ description:
+ - number of instances to launch
+ default: 1
+ count_offset:
+ type: int
+ description:
+ - number count to start at
+ default: 1
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified it will assume the value C(auto).
+ choices:
+ - auto
+ - manual
+ exact_count:
+ description:
+ - Explicitly ensure an exact count of instances, used with
+ state=active/present. If specified as C(yes) and I(count) is less than
+ the servers matched, servers will be deleted to match the count. If
+ the number of matched servers is fewer than specified in I(count)
+ additional servers will be added.
+ type: bool
+ default: 'no'
+ extra_client_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ extra_create_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ files:
+ type: dict
+ description:
+ - Files to insert into the instance. remotefilename:localcontent
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ group:
+ type: str
+ description:
+ - host group to assign to server, is also used for idempotent operations
+ to ensure a specific number of instances
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
+ With I(boot_from_volume), a Cloud Block Storage volume will be created
+ with this image
+ instance_ids:
+ type: list
+ description:
+ - list of instance ids, currently only used when state='absent' to
+ remove instances
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ aliases:
+ - keypair
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the instance
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Jesse Keating (@omgjlk)"
+ - "Matt Martz (@sivel)"
+notes:
+ - I(exact_count) can be "destructive" if the number of running servers in
+ the I(group) is larger than that specified in I(count). In such a case, the
+ I(state) is effectively set to C(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have C(action)
+ set to C(delete), and the oldest servers in the group will be deleted.
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Server
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: rax-test1
+ flavor: 5
+ image: b11d9567-e412-4255-96b9-bd63ab23bcfe
+ key_name: my_rackspace_key
+ files:
+ /root/test.txt: /home/localuser/test.txt
+ wait: yes
+ state: present
+ networks:
+ - private
+ - public
+ register: rax
+
+- name: Build an exact count of cloud servers with incremented names
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: Server build requests
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: test%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ state: present
+ count: 10
+ count_offset: 10
+ exact_count: yes
+ group: test
+ wait: yes
+ register: rax
+'''
+
+import json
+import os
+import re
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
+ rax_find_image, rax_find_network, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.six import string_types
+
+
+def rax_find_server_image(module, server, image, boot_volume):
+ if not image and boot_volume:
+ vol = rax_find_bootable_volume(module, pyrax, server,
+ exit=False)
+ if not vol:
+ return None
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if vol_image_id:
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if server_image:
+ server.image = dict(id=server_image)
+
+ # Match image IDs taking care of boot from volume
+ if image and not server.image:
+ vol = rax_find_bootable_volume(module, pyrax, server)
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if not vol_image_id:
+ return None
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if image != server_image:
+ return None
+
+ server.image = dict(id=server_image)
+ elif image and server.image['id'] != image:
+ return None
+
+ return server.image
+
+
+def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
+ files=None, wait=True, wait_timeout=300, disk_config=None,
+ group=None, nics=None, extra_create_args=None, user_data=None,
+ config_drive=False, existing=None, block_device_mapping_v2=None):
+ names = [] if names is None else names
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ nics = [] if nics is None else nics
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+ existing = [] if existing is None else existing
+ block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
+
+ cs = pyrax.cloudservers
+ changed = False
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(os.path.expanduser(user_data)):
+ try:
+ user_data = os.path.expanduser(user_data)
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ # Handle the file contents
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ fileobj = open(lpath, 'r')
+ files[rpath] = fileobj.read()
+ fileobj.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+ try:
+ servers = []
+ bdmv2 = block_device_mapping_v2
+ for name in names:
+ servers.append(cs.servers.create(name=name, image=image,
+ flavor=flavor, meta=meta,
+ key_name=key_name,
+ files=files, nics=nics,
+ disk_config=disk_config,
+ config_drive=config_drive,
+ userdata=user_data,
+ block_device_mapping_v2=bdmv2,
+ **extra_create_args))
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+ else:
+ changed = True
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+
+ if not filter(lambda s: s.status not in FINAL_STATUSES,
+ servers):
+ break
+ time.sleep(5)
+
+ success = []
+ error = []
+ timeout = []
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+ instance = rax_to_dict(server, 'server')
+ if server.status == 'ACTIVE' or not wait:
+ success.append(instance)
+ elif server.status == 'ERROR':
+ error.append(instance)
+ elif wait:
+ timeout.append(instance)
+
+ untouched = [rax_to_dict(s, 'server') for s in existing]
+ instances = success + untouched
+
+ results = {
+ 'changed': changed,
+ 'action': 'create',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to build'
+ elif error:
+ results['msg'] = 'Failed to build all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
+ instance_ids = [] if instance_ids is None else instance_ids
+ kept = [] if kept is None else kept
+
+ cs = pyrax.cloudservers
+
+ changed = False
+ instances = {}
+ servers = []
+
+ for instance_id in instance_ids:
+ servers.append(cs.servers.get(instance_id))
+
+ for server in servers:
+ try:
+ server.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ changed = True
+
+ instance = rax_to_dict(server, 'server')
+ instances[instance['id']] = instance
+
+ # If requested, wait for server deletion
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ instance_id = server.id
+ try:
+ server.get()
+ except Exception:
+ instances[instance_id]['status'] = 'DELETED'
+ instances[instance_id]['rax_status'] = 'DELETED'
+
+ if not filter(lambda s: s['status'] not in ('', 'DELETED',
+ 'ERROR'),
+ instances.values()):
+ break
+
+ time.sleep(5)
+
+ timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
+ instances.values())
+ error = filter(lambda s: s['status'] in ('ERROR'),
+ instances.values())
+ success = filter(lambda s: s['status'] in ('', 'DELETED'),
+ instances.values())
+
+ instances = [rax_to_dict(s, 'server') for s in kept]
+
+ results = {
+ 'changed': changed,
+ 'action': 'delete',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to delete'
+ elif error:
+ results['msg'] = 'Failed to delete all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def cloudservers(module, state=None, name=None, flavor=None, image=None,
+ meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
+ disk_config=None, count=1, group=None, instance_ids=None,
+ exact_count=False, networks=None, count_offset=0,
+ auto_increment=False, extra_create_args=None, user_data=None,
+ config_drive=False, boot_from_volume=False,
+ boot_volume=None, boot_volume_size=None,
+ boot_volume_terminate=False):
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ instance_ids = [] if instance_ids is None else instance_ids
+ networks = [] if networks is None else networks
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+
+ cs = pyrax.cloudservers
+ cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present' or (state == 'absent' and instance_ids is None):
+ if not boot_from_volume and not boot_volume and not image:
+ module.fail_json(msg='image is required for the "rax" module')
+
+ for arg, value in dict(name=name, flavor=flavor).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax" module' %
+ arg)
+
+ if boot_from_volume and not image and not boot_volume:
+ module.fail_json(msg='image or boot_volume are required for the '
+ '"rax" with boot_from_volume')
+
+ if boot_from_volume and image and not boot_volume_size:
+ module.fail_json(msg='boot_volume_size is required for the "rax" '
+ 'module with boot_from_volume and image')
+
+ if boot_from_volume and image and boot_volume:
+ image = None
+
+ servers = []
+
+ # Add the group meta key
+ if group and 'group' not in meta:
+ meta['group'] = group
+ elif 'group' in meta and group is None:
+ group = meta['group']
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ # When using state=absent with group, the absent block won't match the
+ # names properly. Use the exact_count functionality to decrease the count
+ # to the desired level
+ was_absent = False
+ if group is not None and state == 'absent':
+ exact_count = True
+ state = 'present'
+ was_absent = True
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ # act on the state
+ if state == 'present':
+ # Idempotent ensurance of a specific count of servers
+ if exact_count is not False:
+ # See if we can find servers that match our options
+ if group is None:
+ module.fail_json(msg='"group" must be provided when using '
+ '"exact_count"')
+
+ if auto_increment:
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset, count_offset + count)
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ else: # Not auto incrementing
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ # available_numbers not needed here, we inspect auto_increment
+ # again later
+
+ # If state was absent but the count was changed,
+ # assume we only wanted to remove that number of instances
+ if was_absent:
+ diff = len(servers) - count
+ if diff < 0:
+ count = 0
+ else:
+ count = diff
+
+ if len(servers) > count:
+ # We have more servers than we need, set state='absent'
+ # and delete the extras, this should delete the oldest
+ state = 'absent'
+ kept = servers[:count]
+ del servers[:count]
+ instance_ids = []
+ for server in servers:
+ instance_ids.append(server.id)
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout, kept=kept)
+ elif len(servers) < count:
+ # we have fewer servers than we need
+ if auto_increment:
+ # auto incrementing server numbers
+ names = []
+ name_slice = count - len(servers)
+ numbers_to_use = available_numbers[:name_slice]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # We are not auto incrementing server numbers,
+ # create a list of 'name' that matches how many we need
+ names = [name] * (count - len(servers))
+ else:
+ # we have the right number of servers, just return info
+ # about all of the matched servers
+ instances = []
+ instance_ids = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+ instance_ids.append(server.id)
+ module.exit_json(changed=False, action=None,
+ instances=instances,
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+ else: # not called with exact_count=True
+ if group is not None:
+ if auto_increment:
+ # we are auto incrementing server numbers, but not with
+ # exact_count
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset,
+ count_offset + count + len(numbers))
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # Not auto incrementing
+ names = [name] * count
+ else:
+ # No group was specified, and not using exact_count
+ # Perform more simplistic matching
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ servers = []
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if server.metadata != meta:
+ continue
+ servers.append(server)
+
+ if len(servers) >= count:
+ # We have more servers than were requested, don't do
+ # anything. Not running with exact_count=True, so we assume
+ # more is OK
+ instances = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+
+ instance_ids = [i['id'] for i in instances]
+ module.exit_json(changed=False, action=None,
+ instances=instances, success=[], error=[],
+ timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ # We need more servers to reach out target, create names for
+ # them, we aren't performing auto_increment here
+ names = [name] * (count - len(servers))
+
+ block_device_mapping_v2 = []
+ if boot_from_volume:
+ mapping = {
+ 'boot_index': '0',
+ 'delete_on_termination': boot_volume_terminate,
+ 'destination_type': 'volume',
+ }
+ if image:
+ mapping.update({
+ 'uuid': image,
+ 'source_type': 'image',
+ 'volume_size': boot_volume_size,
+ })
+ image = None
+ elif boot_volume:
+ volume = rax_find_volume(module, pyrax, boot_volume)
+ mapping.update({
+ 'uuid': pyrax.utils.get_id(volume),
+ 'source_type': 'volume',
+ })
+ block_device_mapping_v2.append(mapping)
+
+ create(module, names=names, flavor=flavor, image=image,
+ meta=meta, key_name=key_name, files=files, wait=wait,
+ wait_timeout=wait_timeout, disk_config=disk_config, group=group,
+ nics=nics, extra_create_args=extra_create_args,
+ user_data=user_data, config_drive=config_drive,
+ existing=servers,
+ block_device_mapping_v2=block_device_mapping_v2)
+
+ elif state == 'absent':
+ if instance_ids is None:
+ # We weren't given an explicit list of server IDs to delete
+ # Let's match instead
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if meta != server.metadata:
+ continue
+
+ servers.append(server)
+
+ # Build a list of server IDs to delete
+ instance_ids = []
+ for server in servers:
+ if len(instance_ids) < count:
+ instance_ids.append(server.id)
+ else:
+ break
+
+ if not instance_ids:
+ # No server IDs were matched for deletion, or no IDs were
+ # explicitly provided, just exit and don't do anything
+ module.exit_json(changed=False, action=None, instances=[],
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': [],
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ auto_increment=dict(default=True, type='bool'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(type='str'),
+ boot_volume_size=dict(type='int', default=100),
+ boot_volume_terminate=dict(type='bool', default=False),
+ config_drive=dict(default=False, type='bool'),
+ count=dict(default=1, type='int'),
+ count_offset=dict(default=1, type='int'),
+ disk_config=dict(choices=['auto', 'manual']),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
+ files=dict(type='dict', default={}),
+ flavor=dict(),
+ group=dict(),
+ image=dict(),
+ instance_ids=dict(type='list'),
+ key_name=dict(aliases=['keypair']),
+ meta=dict(type='dict', default={}),
+ name=dict(),
+ networks=dict(type='list', default=['public', 'private']),
+ service=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ service = module.params.get('service')
+
+ if service is not None:
+ module.fail_json(msg='The "service" attribute has been deprecated, '
+ 'please remove "service: cloudservers" from your '
+ 'playbook pertaining to the "rax" module')
+
+ auto_increment = module.params.get('auto_increment')
+ boot_from_volume = module.params.get('boot_from_volume')
+ boot_volume = module.params.get('boot_volume')
+ boot_volume_size = module.params.get('boot_volume_size')
+ boot_volume_terminate = module.params.get('boot_volume_terminate')
+ config_drive = module.params.get('config_drive')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ group = module.params.get('group')
+ image = module.params.get('image')
+ instance_ids = module.params.get('instance_ids')
+ key_name = module.params.get('key_name')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloudservers(module, state=state, name=name, flavor=flavor,
+ image=image, meta=meta, key_name=key_name, files=files,
+ wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
+ count=count, group=group, instance_ids=instance_ids,
+ exact_count=exact_count, networks=networks,
+ count_offset=count_offset, auto_increment=auto_increment,
+ extra_create_args=extra_create_args, user_data=user_data,
+ config_drive=config_drive, boot_from_volume=boot_from_volume,
+ boot_volume=boot_volume, boot_volume_size=boot_volume_size,
+ boot_volume_terminate=boot_volume_terminate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py
new file mode 100644
index 00000000..a681feff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+options:
+ description:
+ type: str
+ description:
+ - Description to give the volume being created
+ image:
+ type: str
+ description:
+ - image to use for bootable volumes. Can be an C(id), C(human_id) or
+ C(name). This option requires C(pyrax>=1.9.3)
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the volume
+ name:
+ type: str
+ description:
+ - Name to give the volume being created
+ required: true
+ size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes
+ default: 100
+ snapshot_id:
+ type: str
+ description:
+ - The id of the snapshot to create the volume from
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ volume_type:
+ type: str
+ description:
+ - Type of the volume being created
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ wait:
+ description:
+ - wait for the volume to be in state 'available' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+from distutils.version import LooseVersion
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image):
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if image:
+ # pyrax<1.9.3 did not have support for specifying an image when
+ # creating a volume which is required for bootable volumes
+ if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
+ module.fail_json(msg='Creating a bootable volume requires '
+ 'pyrax>=1.9.3')
+ image = rax_find_image(module, pyrax, image)
+
+ volume = rax_find_volume(module, pyrax, name)
+
+ if state == 'present':
+ if not volume:
+ kwargs = dict()
+ if image:
+ kwargs['image'] = image
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id, **kwargs)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ instance = rax_to_dict(volume)
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ instance = rax_to_dict(volume)
+ try:
+ volume.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(type='str'),
+ image=dict(type='str'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ image = module.params.get('image')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
new file mode 100644
index 00000000..71d01620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+options:
+ device:
+ type: str
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde.
+ - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
+ volume:
+ type: str
+ description:
+ - Name or id of the volume to attach/detach
+ required: true
+ server:
+ type: str
+ description:
+ - Name or id of the server to attach/detach
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: yes
+ state: present
+ register: my_volume
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
+ rax_argument_spec,
+ rax_find_server,
+ rax_find_volume,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ volume = rax_find_volume(module, pyrax, volume)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).items():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ volume.get()
+ result['volume'] = rax_to_dict(volume)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ result = dict(changed=changed, volume=rax_to_dict(volume))
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=False),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py
new file mode 100644
index 00000000..5b9996cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb
+short_description: create/delete or resize a Rackspace Cloud Databases instance
+description:
+ - creates / deletes or resize a Rackspace Cloud Databases instance
+ and optionally waits for it to be 'running'. The name option needs to be
+ unique since it's used to identify the instance.
+options:
+ name:
+ type: str
+ description:
+ - Name of the databases server instance
+ required: yes
+ flavor:
+ type: int
+ description:
+ - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
+ default: 1
+ volume:
+ type: int
+ description:
+ - Volume size of the database 1-150GB
+ default: 2
+ cdb_type:
+ type: str
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ aliases: ['type']
+ cdb_version:
+ type: str
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ - "The available choices are: C(5.1), C(5.6) and C(10)."
+ default: 5.6
+ aliases: ['version']
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Databases
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax_cdb
+ credentials: ~/.raxpub
+ region: IAD
+ name: db-server1
+ flavor: 1
+ volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
+ wait: yes
+ state: present
+ register: rax_db_server
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_instance(name):
+
+ cdb = pyrax.cloud_databases
+ instances = cdb.list()
+ if instances:
+ for instance in instances:
+ if instance.name == name:
+ return instance
+ return False
+
+
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ for arg, value in dict(name=name, flavor=flavor,
+ volume=volume, type=cdb_type, version=cdb_version
+ ).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb"'
+ ' module' % arg)
+
+ if not (volume >= 1 and volume <= 150):
+ module.fail_json(msg='volume is required to be between 1 and 150')
+
+ cdb = pyrax.cloud_databases
+
+ flavors = []
+ for item in cdb.list_flavors():
+ flavors.append(item.id)
+
+ if not (flavor in flavors):
+ module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
+
+ changed = False
+
+ instance = find_instance(name)
+
+ if not instance:
+ action = 'create'
+ try:
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ else:
+ action = None
+
+ if instance.volume.size != volume:
+ action = 'resize'
+ if instance.volume.size > volume:
+ module.fail_json(changed=False, action=action,
+ msg='The new volume size must be larger than '
+ 'the current volume size',
+ cdb=rax_to_dict(instance))
+ instance.resize_volume(volume)
+ changed = True
+
+ if int(instance.flavor.id) != flavor:
+ action = 'resize'
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+ instance.resize(flavor)
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'ACTIVE':
+ module.fail_json(changed=changed, action=action,
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be created' % name)
+
+ module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
+
+
+def delete_instance(module, name, wait, wait_timeout):
+
+ if not name:
+ module.fail_json(msg='name is required for the "rax_cdb" module')
+
+ changed = False
+
+ instance = find_instance(name)
+ if not instance:
+ module.exit_json(changed=False, action='delete')
+
+ try:
+ instance.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'SHUTDOWN':
+ module.fail_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be deleted' % name)
+
+ module.exit_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance))
+
+
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ # act on the state
+ if state == 'present':
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
+ elif state == 'absent':
+ delete_instance(module, name, wait, wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ flavor=dict(type='int', default=1),
+ volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL', aliases=['type']),
+ cdb_version=dict(type='str', default='5.6', aliases=['version']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ flavor = module.params.get('flavor')
+ volume = module.params.get('volume')
+ cdb_type = module.params.get('cdb_type')
+ cdb_version = module.params.get('cdb_version')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
new file mode 100644
index 00000000..6d3435e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_cdb_database
+short_description: 'create / delete a database in the Cloud Databases'
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ name:
+ type: str
+ description:
+ - Name to give to the database
+ required: yes
+ character_set:
+ type: str
+ description:
+ - Set of symbols and encodings
+ default: 'utf8'
+ collate:
+ type: str
+ description:
+ - Set of rules for comparing characters in a character set
+ default: 'utf8_general_ci'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a database in Cloud Databases
+ tasks:
+ - name: Database build request
+ local_action:
+ module: rax_cdb_database
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ name: db1
+ state: present
+ register: rax_db_database
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_database(instance, name):
+ try:
+ database = instance.get_database(name)
+ except Exception:
+ return False
+
+ return database
+
+
+def save_database(module, cdb_id, name, character_set, collate):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if not database:
+ try:
+ database = instance.create_database(name=name,
+ character_set=character_set,
+ collate=collate)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='create',
+ database=rax_to_dict(database))
+
+
+def delete_database(module, cdb_id, name):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if database:
+ try:
+ database.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete',
+ database=rax_to_dict(database))
+
+
+def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
+
+ # act on the state
+ if state == 'present':
+ save_database(module, cdb_id, name, character_set, collate)
+ elif state == 'absent':
+ delete_database(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ character_set=dict(type='str', default='utf8'),
+ collate=dict(type='str', default='utf8_general_ci'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('name')
+ character_set = module.params.get('character_set')
+ collate = module.params.get('collate')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_database(module, state, cdb_id, name, character_set, collate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
new file mode 100644
index 00000000..34be49d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb_user
+short_description: create / delete a Rackspace Cloud Database
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ db_username:
+ type: str
+ description:
+ - Name of the database user
+ required: yes
+ db_password:
+ type: str
+ description:
+ - Database user password
+ required: yes
+ databases:
+ type: list
+ description:
+ - Name of the databases that the user can access
+ default: []
+ host:
+ type: str
+ description:
+ - Specifies the host from which a user is allowed to connect to
+ the database. Possible values are a string containing an IPv4 address
+ or "%" to allow connecting from any host
+ default: '%'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a user in Cloud Databases
+ tasks:
+ - name: User build request
+ local_action:
+ module: rax_cdb_user
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ db_username: user1
+ db_password: user1
+ databases: ['db1']
+ state: present
+ register: rax_db_user
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_user(instance, name):
+ try:
+ user = instance.get_user(name)
+ except Exception:
+ return False
+
+ return user
+
+
+def save_user(module, cdb_id, name, password, databases, host):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user" '
+ 'module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if not user:
+ action = 'create'
+ try:
+ user = instance.create_user(name=name,
+ password=password,
+ database_names=databases,
+ host=host)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+ else:
+ action = 'update'
+
+ if user.host != host:
+ changed = True
+
+ user.update(password=password, host=host)
+
+ former_dbs = set([item.name for item in user.list_user_access()])
+ databases = set(databases)
+
+ if databases != former_dbs:
+ try:
+ revoke_dbs = [db for db in former_dbs if db not in databases]
+ user.revoke_user_access(db_names=revoke_dbs)
+
+ new_dbs = [db for db in databases if db not in former_dbs]
+ user.grant_user_access(db_names=new_dbs)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
+
+
+def delete_user(module, cdb_id, name):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user"'
+ ' module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if user:
+ try:
+ user.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete')
+
+
+def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
+
+ # act on the state
+ if state == 'present':
+ save_user(module, cdb_id, name, password, databases, host)
+ elif state == 'absent':
+ delete_user(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ db_username=dict(type='str', required=True),
+ db_password=dict(type='str', required=True, no_log=True),
+ databases=dict(type='list', default=[]),
+ host=dict(type='str', default='%'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('db_username')
+ password = module.params.get('db_password')
+ databases = module.params.get('databases')
+ host = to_text(module.params.get('host'), errors='surrogate_or_strict')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_user(module, state, cdb_id, name, password, databases, host)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py
new file mode 100644
index 00000000..5ff1e314
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb
+short_description: create / delete a load balancer in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud load balancer.
+options:
+ algorithm:
+ type: str
+ description:
+ - algorithm for the balancer being created
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
+ default: LEAST_CONNECTIONS
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the load balancer
+ required: yes
+ port:
+ type: int
+ description:
+ - Port for the balancer being created
+ default: 80
+ protocol:
+ type: str
+ description:
+ - Protocol for the balancer being created
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
+ default: HTTP
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ timeout:
+ type: int
+ description:
+ - timeout for communication between the balancer and the node
+ default: 30
+ type:
+ type: str
+ description:
+ - type of interface for the balancer being created
+ choices:
+ - PUBLIC
+ - SERVICENET
+ default: PUBLIC
+ vip_id:
+ type: str
+ description:
+ - Virtual IP ID to use when creating the load balancer for purposes of
+ sharing an IP with another load balancer of another protocol
+ wait:
+ description:
+ - wait for the balancer to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Load Balancer
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Balancer create request
+ local_action:
+ module: rax_clb
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 8080
+ protocol: HTTP
+ type: SERVICENET
+ timeout: 30
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_lb
+'''
+
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
+ CLB_PROTOCOLS,
+ rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id):
+ if int(timeout) < 30:
+ module.fail_json(msg='"timeout" must be greater than or equal to 30')
+
+ changed = False
+ balancers = []
+
+ clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ balancer_list = clb.list()
+ while balancer_list:
+ retrieved = clb.list(marker=balancer_list.pop().id)
+ balancer_list.extend(retrieved)
+ if len(retrieved) < 2:
+ break
+
+ for balancer in balancer_list:
+ if name != balancer.name and name != balancer.id:
+ continue
+
+ balancers.append(balancer)
+
+ if len(balancers) > 1:
+ module.fail_json(msg='Multiple Load Balancers were matched by name, '
+ 'try using the Load Balancer ID instead')
+
+ if state == 'present':
+ if isinstance(meta, dict):
+ metadata = [dict(key=k, value=v) for k, v in meta.items()]
+
+ if not balancers:
+ try:
+ virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
+ balancer = clb.create(name, metadata=metadata, port=port,
+ algorithm=algorithm, protocol=protocol,
+ timeout=timeout, virtual_ips=virtual_ips)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ balancer = balancers[0]
+ setattr(balancer, 'metadata',
+ [dict(key=k, value=v) for k, v in
+ balancer.get_metadata().items()])
+ atts = {
+ 'name': name,
+ 'algorithm': algorithm,
+ 'port': port,
+ 'protocol': protocol,
+ 'timeout': timeout
+ }
+ for att, value in atts.items():
+ current = getattr(balancer, att)
+ if current != value:
+ changed = True
+
+ if changed:
+ balancer.update(**atts)
+
+ if balancer.metadata != metadata:
+ balancer.set_metadata(meta)
+ changed = True
+
+ virtual_ips = [clb.VirtualIP(type=vip_type)]
+ current_vip_types = set([v.type for v in balancer.virtual_ips])
+ vip_types = set([v.type for v in virtual_ips])
+ if current_vip_types != vip_types:
+ module.fail_json(msg='Load balancer Virtual IP type cannot '
+ 'be changed')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ instance = rax_to_dict(balancer, 'clb')
+
+ result = dict(changed=changed, balancer=instance)
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if balancers:
+ balancer = balancers[0]
+ try:
+ balancer.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ instance = rax_to_dict(balancer, 'clb')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
+ interval=5, attempts=attempts)
+ else:
+ instance = {}
+
+ module.exit_json(changed=changed, balancer=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ algorithm=dict(choices=CLB_ALGORITHMS,
+ default='LEAST_CONNECTIONS'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ port=dict(type='int', default=80),
+ protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
+ state=dict(default='present', choices=['present', 'absent']),
+ timeout=dict(type='int', default=30),
+ type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
+ vip_id=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ algorithm = module.params.get('algorithm')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ state = module.params.get('state')
+ timeout = int(module.params.get('timeout'))
+ vip_id = module.params.get('vip_id')
+ vip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
new file mode 100644
index 00000000..c066ab66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb_nodes
+short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
+description:
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
+options:
+ address:
+ type: str
+ required: false
+ description:
+ - IP address or domain name of the node
+ condition:
+ type: str
+ required: false
+ choices:
+ - enabled
+ - disabled
+ - draining
+ description:
+ - Condition for the node, which determines its role within the load
+ balancer
+ load_balancer_id:
+ type: int
+ required: true
+ description:
+ - Load balancer id
+ node_id:
+ type: int
+ required: false
+ description:
+ - Node id
+ port:
+ type: int
+ required: false
+ description:
+ - Port number of the load balanced service on the node
+ state:
+ type: str
+ required: false
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Indicate desired state of the node
+ type:
+ type: str
+ required: false
+ choices:
+ - primary
+ - secondary
+ description:
+ - Type of node
+ wait:
+ required: false
+ default: "no"
+ type: bool
+ description:
+ - Wait for the load balancer to become active before returning
+ wait_timeout:
+ type: int
+ required: false
+ default: 30
+ description:
+ - How long to wait before giving up and returning an error
+ weight:
+ type: int
+ required: false
+ description:
+ - Weight of node
+ virtualenv:
+ type: path
+ description:
+ - Virtualenv to execute this module in
+author: "Lukasz Kawczynski (@neuroid)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Add a new node to the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ address: 10.2.2.3
+ port: 80
+ condition: enabled
+ type: primary
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Drain connections from a node
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ condition: draining
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Remove a node from the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ state: absent
+ wait: yes
+ credentials: /path/to/credentials
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
+
+
+def _activate_virtualenv(path):
+ activate_this = os.path.join(path, 'bin', 'activate_this.py')
+ with open(activate_this) as f:
+ code = compile(f.read(), activate_this, 'exec')
+ exec(code)
+
+
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
+ return node
+
+ return None
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ condition=dict(choices=['enabled', 'disabled', 'draining']),
+ load_balancer_id=dict(required=True, type='int'),
+ node_id=dict(type='int'),
+ port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ type=dict(choices=['primary', 'secondary']),
+ virtualenv=dict(type='path'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=30, type='int'),
+ weight=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params['address']
+ condition = (module.params['condition'] and
+ module.params['condition'].upper())
+ load_balancer_id = module.params['load_balancer_id']
+ node_id = module.params['node_id']
+ port = module.params['port']
+ state = module.params['state']
+ typ = module.params['type'] and module.params['type'].upper()
+ virtualenv = module.params['virtualenv']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout'] or 1
+ weight = module.params['weight']
+
+ if virtualenv:
+ try:
+ _activate_virtualenv(virtualenv)
+ except IOError as e:
+ module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
+ virtualenv, e))
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.cloud_loadbalancers:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ node = _get_node(lb, node_id, address, port)
+
+ result = rax_clb_node_to_dict(node)
+
+ if state == 'absent':
+ if not node: # Removing a non-existent node
+ module.exit_json(changed=False, state=state)
+ try:
+ lb.delete_node(node)
+ result = {}
+ except pyrax.exc.NotFound:
+ module.exit_json(changed=False, state=state)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # present
+ if not node:
+ if node_id: # Updating a non-existent node
+ msg = 'Node %d not found' % node_id
+ if lb.nodes:
+ msg += (' (available nodes: %s)' %
+ ', '.join([str(x.id) for x in lb.nodes]))
+ module.fail_json(msg=msg)
+ else: # Creating a new node
+ try:
+ node = pyrax.cloudloadbalancers.Node(
+ address=address, port=port, condition=condition,
+ weight=weight, type=typ)
+ resp, body = lb.add_nodes([node])
+ result.update(body['nodes'][0])
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # Updating an existing node
+ mutable = {
+ 'condition': condition,
+ 'type': typ,
+ 'weight': weight,
+ }
+
+ for name, value in mutable.items():
+ if value is None or value == getattr(node, name):
+ mutable.pop(name)
+
+ if not mutable:
+ module.exit_json(changed=False, state=state, node=result)
+
+ try:
+ # The diff has to be set explicitly to update node's weight and
+ # type; this should probably be fixed in pyrax
+ lb.update_node(node, diff=mutable)
+ result.update(mutable)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if wait:
+ pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
+ attempts=wait_timeout)
+ if lb.status != 'ACTIVE':
+ module.fail_json(
+ msg='Load balancer not active after %ds (current status: %s)' %
+ (wait_timeout, lb.status.lower()))
+
+ kwargs = {'node': result} if result else {}
+ module.exit_json(changed=True, state=state, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
new file mode 100644
index 00000000..114128e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+options:
+ loadbalancer:
+ type: str
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ type: bool
+ private_key:
+ type: str
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ type: str
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ type: str
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ type: int
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ type: bool
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ type: bool
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ type: bool
+ wait_timeout:
+ type: int
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ community.general.rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ community.general.rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout // 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.items():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(no_log=True),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py
new file mode 100644
index 00000000..e9b7e2be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns
+short_description: Manage domains on Rackspace Cloud DNS
+description:
+ - Manage domains on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ email:
+ type: str
+ description:
+ - Email address of the domain administrator
+ name:
+ type: str
+ description:
+ - Domain name to create
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of domain in seconds
+ default: 3600
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create domain
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Domain create request
+ local_action:
+ module: rax_dns
+ credentials: ~/.raxpub
+ name: example.org
+ email: admin@example.org
+ register: rax_dns
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns(module, comment, email, name, state, ttl):
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not email:
+ module.fail_json(msg='An "email" attribute is required for '
+ 'creating a domain')
+
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ domain = dns.create(name=name, emailAddress=email, ttl=ttl,
+ comment=comment)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(domain, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(domain, 'ttl', None):
+ update['ttl'] = ttl
+ if email != getattr(domain, 'emailAddress', None):
+ update['emailAddress'] = email
+
+ if update:
+ try:
+ domain.update(**update)
+ changed = True
+ domain.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NotFound:
+ domain = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if domain:
+ try:
+ domain.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, domain=rax_to_dict(domain))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ email=dict(),
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ email = module.params.get('email')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+
+ setup_rax_module(module, pyrax, False)
+
+ rax_dns(module, comment, email, name, state, ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py
new file mode 100644
index 00000000..0b60120a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns_record
+short_description: Manage DNS records on Rackspace Cloud DNS
+description:
+ - Manage DNS records on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ data:
+ type: str
+ description:
+ - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
+ SRV/TXT
+ required: True
+ domain:
+ type: str
+ description:
+ - Domain name to create the record in. This is an invalid option when
+ type=PTR
+ loadbalancer:
+ type: str
+ description:
+ - Load Balancer ID to create a PTR record for. Only used with type=PTR
+ name:
+ type: str
+ description:
+ - FQDN record name to create
+ required: True
+ overwrite:
+ description:
+ - Add new records if data doesn't match, instead of updating existing
+ record with matching name. If there are already multiple records with
+ matching name and overwrite=true, this module will fail.
+ default: true
+ type: bool
+ priority:
+ type: int
+ description:
+ - Required for MX and SRV records, but forbidden for other record types.
+ If specified, must be an integer from 0 to 65535.
+ server:
+ type: str
+ description:
+ - Server ID to create a PTR record for. Only used with type=PTR
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of record in seconds
+ default: 3600
+ type:
+ type: str
+ description:
+ - DNS record type
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
+ - PTR
+ required: true
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+ - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
+ supplied
+ - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
+ - C(PTR) record support was added in version 1.7
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create DNS Records
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Create A record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ domain: example.org
+ name: www.example.org
+ data: "{{ rax_accessipv4 }}"
+ type: A
+ register: a_record
+
+ - name: Create PTR record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ server: "{{ rax_id }}"
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ register: ptr_record
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_find_server,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
+ name=None, server=None, state='present', ttl=7200):
+ changed = False
+ results = []
+
+ dns = pyrax.cloud_dns
+
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if loadbalancer:
+ item = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ elif server:
+ item = rax_find_server(module, pyrax, server)
+
+ if state == 'present':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ if record.ttl != ttl or record.name != name:
+ try:
+ dns.update_ptr_record(item, record, name, data, ttl)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ record.ttl = ttl
+ record.name = name
+ results.append(rax_to_dict(record))
+ break
+ else:
+ results.append(rax_to_dict(record))
+ break
+
+ if not results:
+ record = dict(name=name, type='PTR', data=data, ttl=ttl,
+ comment=comment)
+ try:
+ results = dns.add_ptr_records(item, [record])
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+ elif state == 'absent':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ results.append(rax_to_dict(record))
+ break
+
+ if results:
+ try:
+ dns.delete_ptr_records(item, data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+
+def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
+ overwrite=True, priority=None, record_type='A',
+ state='present', ttl=7200):
+ """Function for manipulating record types other than PTR"""
+
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not priority and record_type in ['MX', 'SRV']:
+ module.fail_json(msg='A "priority" attribute is required for '
+ 'creating a MX or SRV record')
+
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ if overwrite:
+ record = domain.find_record(record_type, name=name)
+ else:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='overwrite=true and there are multiple matching records')
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ try:
+ record_data = {
+ 'type': record_type,
+ 'name': name,
+ 'data': data,
+ 'ttl': ttl
+ }
+ if comment:
+ record_data.update(dict(comment=comment))
+ if priority and record_type.upper() in ['MX', 'SRV']:
+ record_data.update(dict(priority=priority))
+
+ record = domain.add_records([record_data])[0]
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(record, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(record, 'ttl', None):
+ update['ttl'] = ttl
+ if priority != getattr(record, 'priority', None):
+ update['priority'] = priority
+ if data != getattr(record, 'data', None):
+ update['data'] = data
+
+ if update:
+ try:
+ record.update(**update)
+ changed = True
+ record.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ record = {}
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if record:
+ try:
+ record.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, record=rax_to_dict(record))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ data=dict(required=True),
+ domain=dict(),
+ loadbalancer=dict(),
+ name=dict(required=True),
+ overwrite=dict(type='bool', default=True),
+ priority=dict(type='int'),
+ server=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
+ 'SRV', 'TXT', 'PTR'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ required_one_of=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ data = module.params.get('data')
+ domain = module.params.get('domain')
+ loadbalancer = module.params.get('loadbalancer')
+ name = module.params.get('name')
+ overwrite = module.params.get('overwrite')
+ priority = module.params.get('priority')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+ record_type = module.params.get('type')
+
+ setup_rax_module(module, pyrax, False)
+
+ if record_type.upper() == 'PTR':
+ if not server and not loadbalancer:
+ module.fail_json(msg='one of the following is required: '
+ 'server,loadbalancer')
+ rax_dns_record_ptr(module, data=data, comment=comment,
+ loadbalancer=loadbalancer, name=name, server=server,
+ state=state, ttl=ttl)
+ else:
+ rax_dns_record(module, comment=comment, data=data, domain=domain,
+ name=name, overwrite=overwrite, priority=priority,
+ record_type=record_type, state=state, ttl=ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py
new file mode 100644
index 00000000..386ca7cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_facts
+short_description: Gather facts for Rackspace Cloud Servers
+description:
+ - Gather facts for Rackspace Cloud Servers.
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to retrieve facts for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to retrieve facts for
+ name:
+ type: str
+ description:
+ - Server name to retrieve facts for
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Gather info about servers
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Get facts about servers
+ local_action:
+ module: rax_facts
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ - name: Map some facts
+ ansible.builtin.set_fact:
+ ansible_ssh_host: "{{ rax_accessipv4 }}"
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_facts(module, address, name, server_id):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ ansible_facts = {}
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ servers[:] = [server for server in servers if server.status != "DELETED"]
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif len(servers) == 1:
+ ansible_facts = rax_to_dict(servers[0], 'server')
+
+ module.exit_json(changed=changed, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+
+ setup_rax_module(module, pyrax)
+
+ rax_facts(module, address, name, server_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py
new file mode 100644
index 00000000..7080cc2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files
+short_description: Manipulate Rackspace Cloud Files Containers
+description:
+ - Manipulate Rackspace Cloud Files Containers
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing containers.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: "no"
+ container:
+ type: str
+ description:
+ - The container to use for container or metadata operations.
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on a container
+ private:
+ description:
+ - Used to set a container as private, removing it from the CDN. B(Warning!)
+ Private containers, if previously made public, can have live objects
+ available until the TTL on cached objects expires
+ type: bool
+ default: false
+ public:
+ description:
+ - Used to set a container as public, available via the Cloud Files CDN
+ type: bool
+ default: false
+ region:
+ type: str
+ description:
+ - Region to create an instance in
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent', 'list']
+ default: present
+ ttl:
+ type: int
+ description:
+ - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
+ Setting a TTL is only appropriate for containers that are public
+ type:
+ type: str
+ description:
+ - Type of object to do work on, i.e. metadata object or a container object
+ choices:
+ - container
+ - meta
+ default: container
+ web_error:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
+ web_index:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Containers"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "List all containers"
+ community.general.rax_files:
+ state: list
+
+ - name: "Create container called 'mycontainer'"
+ community.general.rax_files:
+ container: mycontainer
+
+ - name: "Create container 'mycontainer2' with metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ meta:
+ key: value
+ file_for: someuser@example.com
+
+ - name: "Set a container's web index page"
+ community.general.rax_files:
+ container: mycontainer
+ web_index: index.html
+
+ - name: "Set a container's web error page"
+ community.general.rax_files:
+ container: mycontainer
+ web_error: error.html
+
+ - name: "Make container public"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+
+ - name: "Make container public with a 24 hour TTL"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+ ttl: 86400
+
+ - name: "Make container private"
+ community.general.rax_files:
+ container: mycontainer
+ private: yes
+
+- name: "Test Cloud Files Containers Metadata Storage"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "Get mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+
+ - name: "Set mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+ meta:
+ uploaded_by: someuser@example.com
+
+ - name: "Remove mycontainer2 metadata"
+ community.general.rax_files:
+ container: "mycontainer2"
+ type: meta
+ state: absent
+ meta:
+ key: ""
+ file_for: ""
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError as e:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=True)
+META_PREFIX = 'x-container-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _fetch_meta(module, container):
+ EXIT_DICT['meta'] = dict()
+ try:
+ for k, v in container.get_metadata().items():
+ split_key = k.split(META_PREFIX)[-1]
+ EXIT_DICT['meta'][split_key] = v
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+
+def meta(cf, module, container_, state, meta_, clear_meta):
+ c = _get_container(module, cf, container_)
+
+ if meta_ and state == 'present':
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif meta_ and state == 'absent':
+ remove_results = []
+ for k, v in meta_.items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+ elif state == 'absent':
+ remove_results = []
+ for k, v in c.get_metadata().items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+
+ _fetch_meta(module, c)
+ _locals = locals().keys()
+
+ EXIT_DICT['container'] = c.name
+ if 'meta_set' in _locals or 'remove_results' in _locals:
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
+ if public and private:
+ module.fail_json(msg='container cannot be simultaneously '
+ 'set to public and private')
+
+ if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
+ module.fail_json(msg='state cannot be omitted when setting/removing '
+ 'attributes on a container')
+
+ if state == 'list':
+ # We don't care if attributes are specified, let's list containers
+ EXIT_DICT['containers'] = cf.list_containers()
+ module.exit_json(**EXIT_DICT)
+
+ try:
+ c = cf.get_container(container_)
+ except pyrax.exc.NoSuchContainer as e:
+ # Make the container if state=present, otherwise bomb out
+ if state == 'present':
+ try:
+ c = cf.create_container(container_)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['created'] = True
+ else:
+ module.fail_json(msg=e.message)
+ else:
+ # Successfully grabbed a container object
+ # Delete if state is absent
+ if state == 'absent':
+ try:
+ cont_deleted = c.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['deleted'] = True
+
+ if meta_:
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ finally:
+ _fetch_meta(module, c)
+
+ if ttl:
+ try:
+ c.cdn_ttl = ttl
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['ttl'] = c.cdn_ttl
+
+ if public:
+ try:
+ cont_public = c.make_public()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
+ ssl_url=c.cdn_ssl_uri,
+ streaming_url=c.cdn_streaming_uri,
+ ios_uri=c.cdn_ios_uri)
+
+ if private:
+ try:
+ cont_private = c.make_private()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_private'] = True
+
+ if web_index:
+ try:
+ cont_web_index = c.set_web_index_page(web_index)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_index'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ if web_error:
+ try:
+ cont_err_index = c.set_web_error_page(web_error)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_error'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['objs_in_container'] = c.object_count
+ EXIT_DICT['total_bytes'] = c.total_bytes
+
+ _locals = locals().keys()
+ if ('cont_deleted' in _locals
+ or 'meta_set' in _locals
+ or 'cont_public' in _locals
+ or 'cont_private' in _locals
+ or 'cont_web_index' in _locals
+ or 'cont_err_index' in _locals):
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ type=dict(choices=['container', 'meta'], default='container'),
+ ttl=dict(type='int'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
+ web_index=dict(),
+ web_error=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container_ = module.params.get('container')
+ state = module.params.get('state')
+ meta_ = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ typ = module.params.get('type')
+ ttl = module.params.get('ttl')
+ public = module.params.get('public')
+ private = module.params.get('private')
+ web_index = module.params.get('web_index')
+ web_error = module.params.get('web_error')
+
+ if state in ['present', 'absent'] and not container_:
+ module.fail_json(msg='please specify a container name')
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py
new file mode 100644
index 00000000..dc445554
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files_objects
+short_description: Upload, download, and delete objects in Rackspace Cloud Files
+description:
+ - Upload, download, and delete objects in Rackspace Cloud Files
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing objects.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: 'no'
+ container:
+ type: str
+ description:
+ - The container to use for file object operations.
+ required: true
+ dest:
+ type: str
+ description:
+ - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
+ Used to specify the destination of an operation on a remote object; i.e. a file name,
+ "file1", or a comma-separated list of remote objects, "file1,file2,file17"
+ expires:
+ type: int
+ description:
+ - Used to set an expiration on a file or folder uploaded to Cloud Files.
+ Requires an integer, specifying expiration in seconds
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on an uploaded file or folder
+ method:
+ type: str
+ description:
+ - The method of operation to be performed. For example, put to upload files
+ to Cloud Files, get to download files from Cloud Files or delete to delete
+ remote objects in Cloud Files
+ choices:
+ - get
+ - put
+ - delete
+ default: get
+ src:
+ type: str
+ description:
+ - Source from which to upload files. Used to specify a remote object as a source for
+ an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
+ "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
+ structure:
+ description:
+ - Used to specify whether to maintain nested directory structure when downloading objects
+ from Cloud Files. Setting to false downloads the contents of a container to a single,
+ flat directory
+ type: bool
+ default: 'yes'
+ type:
+ type: str
+ description:
+ - Type of object to do work on
+ - Metadata object or a file object
+ choices:
+ - file
+ - meta
+ default: file
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Objects"
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: "Get objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
+
+ - name: "Get single object from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
+
+ - name: "Get several objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
+
+ - name: "Delete one object in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
+
+ - name: "Delete several objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
+
+ - name: "Delete all objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+
+ - name: "Upload all files to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
+
+ - name: "Upload one file to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
+
+ - name: "Upload one file to test container with metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ src: ~/Downloads/testcont/file2
+ method: put
+ meta:
+ testkey: testdata
+ who_uploaded_this: someuser@example.com
+
+ - name: "Upload one file to test container with TTL of 60 seconds"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
+
+ - name: "Attempt to get remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
+ ignore_errors: yes
+
+ - name: "Attempt to delete remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
+ ignore_errors: yes
+
+- name: "Test Cloud Files Objects Metadata"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get metadata on one object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
+
+ - name: "Get metadata on several objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
+
+ - name: "Set metadata on an object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: put
+ meta:
+ key1: value1
+ key2: value2
+ clear_meta: true
+
+ - name: "Verify metadata is set"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
+
+ - name: "Delete metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: delete
+ meta:
+ key1: ''
+ key2: ''
+
+ - name: "Get metadata on all objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=False)
+META_PREFIX = 'x-object-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _upload_folder(cf, folder, container, ttl=None, headers=None):
+ """ Uploads a folder to Cloud Files.
+ """
+ total_bytes = 0
+ for root, dirs, files in os.walk(folder):
+ for fname in files:
+ full_path = os.path.join(root, fname)
+ obj_name = os.path.relpath(full_path, folder)
+ obj_size = os.path.getsize(full_path)
+ cf.upload_file(container, full_path,
+ obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
+ total_bytes += obj_size
+ return total_bytes
+
+
+def upload(module, cf, container, src, dest, meta, expires):
+ """ Uploads a single object or a folder to Cloud Files Optionally sets an
+ metadata, TTL value (expires), or Content-Disposition and Content-Encoding
+ headers.
+ """
+ if not src:
+ module.fail_json(msg='src must be specified when uploading')
+
+ c = _get_container(module, cf, container)
+ src = os.path.abspath(os.path.expanduser(src))
+ is_dir = os.path.isdir(src)
+
+ if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
+ module.fail_json(msg='src must be a file or a directory')
+ if dest and is_dir:
+ module.fail_json(msg='dest cannot be set when whole '
+ 'directories are uploaded')
+
+ cont_obj = None
+ total_bytes = 0
+ if dest and not is_dir:
+ try:
+ cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif is_dir:
+ try:
+ total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ try:
+ cont_obj = c.upload_file(src, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['success'] = True
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
+ if cont_obj or total_bytes > 0:
+ EXIT_DICT['changed'] = True
+ if meta:
+ EXIT_DICT['meta'] = dict(updated=True)
+
+ if cont_obj:
+ EXIT_DICT['bytes'] = cont_obj.total_bytes
+ EXIT_DICT['etag'] = cont_obj.etag
+ else:
+ EXIT_DICT['bytes'] = total_bytes
+
+ module.exit_json(**EXIT_DICT)
+
+
+def download(module, cf, container, src, dest, structure):
+ """ Download objects from Cloud Files to a local path specified by "dest".
+ Optionally disable maintaining a directory structure by by passing a
+ false value to "structure".
+ """
+ # Looking for an explicit destination
+ if not dest:
+ module.fail_json(msg='dest is a required argument when '
+ 'downloading from Cloud Files')
+
+ # Attempt to fetch the container by name
+ c = _get_container(module, cf, container)
+
+ # Accept a single object name or a comma-separated list of objs
+ # If not specified, get the entire container
+ if src:
+ objs = src.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ dest = os.path.abspath(os.path.expanduser(dest))
+ is_dir = os.path.isdir(dest)
+
+ if not is_dir:
+ module.fail_json(msg='dest must be a directory')
+
+ results = []
+ for obj in objs:
+ try:
+ c.download_object(obj, dest, structure=structure)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(obj)
+
+ len_results = len(results)
+ len_objs = len(objs)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['requested_downloaded'] = results
+ if results:
+ EXIT_DICT['changed'] = True
+ if len_results == len_objs:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
+ else:
+ EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
+ "downloaded" % (len_results, len_objs)
+ module.exit_json(**EXIT_DICT)
+
+
+def delete(module, cf, container, src, dest):
+ """ Delete specific objects by proving a single file name or a
+ comma-separated list to src OR dest (but not both). Omitting file name(s)
+ assumes the entire container is to be deleted.
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ c = _get_container(module, cf, container)
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ num_objs = len(objs)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.delete_object(obj)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ num_deleted = results.count(True)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['deleted'] = num_deleted
+ EXIT_DICT['requested_deleted'] = objs
+
+ if num_deleted:
+ EXIT_DICT['changed'] = True
+
+ if num_objs == num_deleted:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
+ else:
+ EXIT_DICT['msg'] = ("Error: only %s of %s objects "
+ "deleted" % (num_deleted, num_objs))
+ module.exit_json(**EXIT_DICT)
+
+
+def get_meta(module, cf, container, src, dest):
+ """ Get metadata for a single file, comma-separated list, or entire
+ container
+ """
+ c = _get_container(module, cf, container)
+
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ results = dict()
+ for obj in objs:
+ try:
+ meta = c.get_object(obj).get_metadata()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results[obj] = dict()
+ for k, v in meta.items():
+ meta_key = k.split(META_PREFIX)[-1]
+ results[obj][meta_key] = v
+
+ EXIT_DICT['container'] = c.name
+ if results:
+ EXIT_DICT['meta_results'] = results
+ EXIT_DICT['success'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def put_meta(module, cf, container, src, dest, meta, clear_meta):
+ """ Set metadata on a container, single file, or comma-separated list.
+ Passing a true value to clear_meta clears the metadata stored in Cloud
+ Files before setting the new metadata to the value of "meta".
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to set meta"
+ " have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_changed'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def delete_meta(module, cf, container, src, dest, meta):
+ """ Removes metadata keys and values specified in meta, if any. Deletes on
+ all objects specified by src or dest (but not both), if any; otherwise it
+ deletes keys on all objects in the container
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
+ "deleted have been specified on both src and dest"
+ " args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = [] # Num of metadata keys removed, not objects affected
+ for obj in objs:
+ if meta:
+ for k, v in meta.items():
+ try:
+ result = c.get_object(obj).remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+ else:
+ try:
+ o = c.get_object(obj)
+ except pyrax.exc.NoSuchObject as e:
+ module.fail_json(msg=e.message)
+
+ for k, v in o.get_metadata().items():
+ try:
+ result = o.remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_deleted'] = len(results)
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
+ structure, expires):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "file":
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
+
+ elif method == 'get':
+ download(module, cf, container, src, dest, structure)
+
+ elif method == 'delete':
+ delete(module, cf, container, src, dest)
+
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
+
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(required=True),
+ src=dict(),
+ dest=dict(),
+ method=dict(default='get', choices=['put', 'get', 'delete']),
+ type=dict(default='file', choices=['file', 'meta']),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
+ expires=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container = module.params.get('container')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ method = module.params.get('method')
+ typ = module.params.get('type')
+ meta = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ structure = module.params.get('structure')
+ expires = module.params.get('expires')
+
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py
new file mode 100644
index 00000000..330c510d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present']
+ default: present
+ required: false
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def cloud_identity(module, state, identity):
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ instance.update(rax_to_dict(identity))
+ instance['services'] = instance.get('services', {}).keys()
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.identity:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py
new file mode 100644
index 00000000..0314883f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_keypair
+short_description: Create a keypair for use with Rackspace Cloud Servers
+description:
+ - Create a keypair for use with Rackspace Cloud Servers
+options:
+ name:
+ type: str
+ description:
+ - Name of keypair
+ required: true
+ public_key:
+ type: str
+ description:
+ - Public Key string to upload. Can be a file path or string
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+notes:
+ - Keypairs cannot be manipulated, only created and deleted. To "update" a
+ keypair you must first delete and then recreate.
+ - The ability to specify a file path for the public key was added in 1.7
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ region: DFW
+ register: keypair
+ - name: Create local public key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.public_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
+ - name: Create local private key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.private_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
+
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_keypair(module, name, public_key, state):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ keypair = {}
+
+ if state == 'present':
+ if public_key and os.path.isfile(public_key):
+ try:
+ f = open(public_key)
+ public_key = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % public_key)
+
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except cs.exceptions.NotFound:
+ try:
+ keypair = cs.keypairs.create(name, public_key)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except Exception:
+ pass
+
+ if keypair:
+ try:
+ keypair.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ public_key=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ public_key = module.params.get('public_key')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ rax_keypair(module, name, public_key, state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py
new file mode 100644
index 00000000..b7d172d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_meta
+short_description: Manipulate metadata for Rackspace Cloud Servers
+description:
+ - Manipulate metadata for Rackspace Cloud Servers
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to modify metadata for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to modify metadata for
+ name:
+ type: str
+ description:
+ - Server name to modify metadata for
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Set metadata for a server
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Set metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ meta:
+ group: primary_group
+ groups:
+ - group_two
+ - group_three
+ app: my_app
+
+ - name: Clear metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+'''
+
+import json
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+from ansible.module_utils.six import string_types
+
+
+def rax_meta(module, address, name, server_id, meta):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif not servers:
+ module.fail_json(msg='Failed to find a server matching provided '
+ 'search parameters')
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ server = servers[0]
+ if server.metadata == meta:
+ changed = False
+ else:
+ changed = True
+ removed = set(server.metadata.keys()).difference(meta.keys())
+ cs.servers.delete_meta(server, list(removed))
+ cs.servers.set_meta(server, meta)
+ server.get()
+
+ module.exit_json(changed=changed, meta=server.metadata)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ meta=dict(type='dict', default=dict()),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+ meta = module.params.get('meta')
+
+ setup_rax_module(module, pyrax)
+
+ rax_meta(module, address, name, server_id, meta)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
new file mode 100644
index 00000000..8de26609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ type: str
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ type: str
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ type: str
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ type: str
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ type: str
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ community.general.rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py
new file mode 100644
index 00000000..e04dfc74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ entity_id:
+ type: str
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ type: str
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ type: str
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ - |
+ Choices for this option are:
+ - C(remote.dns)
+ - C(remote.ftp-banner)
+ - C(remote.http)
+ - C(remote.imap-banner)
+ - C(remote.mssql-banner)
+ - C(remote.mysql-banner)
+ - C(remote.ping)
+ - C(remote.pop3-banner)
+ - C(remote.postgresql-banner)
+ - C(remote.smtp-banner)
+ - C(remote.smtp)
+ - C(remote.ssh)
+ - C(remote.tcp)
+ - C(remote.telnet-banner)
+ - C(agent.filesystem)
+ - C(agent.memory)
+ - C(agent.load_average)
+ - C(agent.cpu)
+ - C(agent.disk)
+ - C(agent.network)
+ - C(agent.plugin)
+ required: true
+ monitoring_zones_poll:
+ type: str
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ type: str
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ type: str
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ type: dict
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ type: int
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ type: int
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ community.general.rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.items():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
new file mode 100644
index 00000000..69f49cd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ label:
+ type: str
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ type: str
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ agent_id:
+ type: str
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ type: dict
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ community.general.rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
new file mode 100644
index 00000000..416d03ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ type: str
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ type: dict
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
new file mode 100644
index 00000000..a4b8920d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ community.general.rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py
new file mode 100644
index 00000000..27a793b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_network
+short_description: create / delete an isolated network in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud isolated network.
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ label:
+ type: str
+ description:
+ - Label (name) to give the network
+ required: yes
+ cidr:
+ type: str
+ description:
+ - cidr of the network being created
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Jesse Keating (@omgjlk)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build an Isolated Network
+ gather_facts: False
+
+ tasks:
+ - name: Network create request
+ local_action:
+ module: rax_network
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_network(module, state, label, cidr):
+ changed = False
+ network = None
+ networks = []
+
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not cidr:
+ module.fail_json(msg='missing required arguments: cidr')
+
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ except pyrax.exceptions.NetworkNotFound:
+ try:
+ network = pyrax.cloud_networks.create(label, cidr=cidr)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ network.delete()
+ changed = True
+ except pyrax.exceptions.NetworkNotFound:
+ pass
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if network:
+ instance = dict(id=network.id,
+ label=network.label,
+ cidr=network.cidr)
+ networks.append(instance)
+
+ module.exit_json(changed=changed, networks=networks)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present',
+ choices=['present', 'absent']),
+ label=dict(required=True),
+ cidr=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ cidr = module.params.get('cidr')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_network(module, state, label, cidr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py
new file mode 100644
index 00000000..dca006da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_queue
+short_description: create / delete a queue in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud queue.
+options:
+ name:
+ type: str
+ description:
+ - Name to give the queue
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Queue
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Queue create request
+ local_action:
+ module: rax_queue
+ credentials: ~/.raxpub
+ name: my-queue
+ region: DFW
+ state: present
+ register: my_queue
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_queue(module, state, name):
+ for arg in (state, name):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_queue' % arg)
+
+ changed = False
+ queues = []
+ instance = {}
+
+ cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ for queue in cq.list():
+ if name != queue.name:
+ continue
+
+ queues.append(queue)
+
+ if len(queues) > 1:
+ module.fail_json(msg='Multiple Queues were matched by name')
+
+ if state == 'present':
+ if not queues:
+ try:
+ queue = cq.create(name)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ queue = queues[0]
+
+ instance = dict(name=queue.name)
+ result = dict(changed=changed, queue=instance)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if queues:
+ queue = queues[0]
+ try:
+ queue.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, queue=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_queue(module, state, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
new file mode 100644
index 00000000..7b2b6ace
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+options:
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified, it will fallback to C(auto).
+ choices:
+ - auto
+ - manual
+ files:
+ type: dict
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ loadbalancers:
+ type: list
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ type: int
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ min_entities:
+ type: int
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ type: str
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import base64
+import json
+import os
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six import string_types
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
+ image=None, key_name=None, loadbalancers=None, meta=None,
+ min_entities=0, max_entities=0, name=None, networks=None,
+ server_name=None, state='present', user_data=None,
+ config_drive=False, wait=True, wait_timeout=300):
+ files = {} if files is None else files
+ loadbalancers = [] if loadbalancers is None else loadbalancers
+ meta = {} if meta is None else meta
+ networks = [] if networks is None else networks
+
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(user_data):
+ try:
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ for nic in nics:
+ # pyrax is currently returning net-id, but we need uuid
+ # this check makes this forward compatible for a time when
+ # pyrax uses uuid instead
+ if nic.get('net-id'):
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = []
+ if files:
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ f = open(lpath, 'r')
+ personality.append({
+ 'path': rpath,
+ 'contents': f.read()
+ })
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ try:
+ lb_id = int(lb.get('id'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer ID is not an integer: '
+ '%s' % lb.get('id'))
+ try:
+ port = int(lb.get('port'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer port is not an '
+ 'integer: %s' % lb.get('port'))
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=personality,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name, config_drive=config_drive,
+ user_data=user_data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['server_name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ disk_config = disk_config or 'AUTO'
+ if ((disk_config or lc.get('disk_config')) and
+ disk_config != lc.get('disk_config', 'AUTO')):
+ lc_args['disk_config'] = disk_config
+
+ if (meta or lc.get('meta')) and meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ test_personality = []
+ for p in personality:
+ test_personality.append({
+ 'path': p['path'],
+ 'contents': base64.b64encode(p['contents'])
+ })
+ if ((test_personality or lc.get('personality')) and
+ test_personality != lc.get('personality')):
+ lc_args['personality'] = personality
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if config_drive != lc.get('config_drive', False):
+ lc_args['config_drive'] = config_drive
+
+ if (user_data and
+ base64.b64encode(user_data) != lc.get('user_data')):
+ lc_args['user_data'] = user_data
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound as e:
+ sg = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ config_drive=dict(default=False, type='bool'),
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='dict', default={}),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ config_drive = module.params.get('config_drive')
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state, config_drive=config_drive, user_data=user_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
new file mode 100644
index 00000000..384825f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+options:
+ at:
+ type: str
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ type: int
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ type: str
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ desired_capacity:
+ type: int
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ type: bool
+ name:
+ type: str
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ type: str
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ type: str
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py
new file mode 100644
index 00000000..7100d378
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: read_csv
+short_description: Read a CSV file
+description:
+- Read a CSV file and return a list or a dictionary, containing one dictionary per row.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ path:
+ description:
+ - The CSV filename to read data from.
+ type: path
+ required: yes
+ aliases: [ filename ]
+ key:
+ description:
+ - The column name used as a key for the resulting dictionary.
+ - If C(key) is unset, the module returns a list of dictionaries,
+ where each dictionary is a row in the CSV file.
+ type: str
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ unique:
+ description:
+ - Whether the C(key) used is expected to be unique.
+ type: bool
+ default: yes
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+notes:
+- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+'''
+
+EXAMPLES = r'''
+# Example CSV file with header
+#
+# name,uid,gid
+# dag,500,500
+# jeroen,501,500
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ community.general.read_csv:
+ path: users.csv
+ key: name
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}'
+
+# Example CSV file without header and semi-colon delimiter
+#
+# dag;500;500
+# jeroen;501;500
+
+# Read a CSV file without headers
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ fieldnames: name,uid,gid
+ delimiter: ';'
+ register: users
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dict:
+ description: The CSV content as a dictionary.
+ returned: success
+ type: dict
+ sample:
+ dag:
+ name: dag
+ uid: 500
+ gid: 500
+ jeroen:
+ name: jeroen
+ uid: 501
+ gid: 500
+list:
+ description: The CSV content as a list.
+ returned: success
+ type: list
+ sample:
+ - name: dag
+ uid: 500
+ gid: 500
+ - name: jeroen
+ uid: 501
+ gid: 500
+'''
+
+import csv
+from io import BytesIO, StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import PY3
+
+
+# Add Unix dialect from Python 3
+class unix_dialect(csv.Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+
+csv.register_dialect("unix", unix_dialect)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['filename']),
+ dialect=dict(type='str', default='excel'),
+ key=dict(type='str'),
+ fieldnames=dict(type='list', elements='str'),
+ unique=dict(type='bool', default=True),
+ delimiter=dict(type='str'),
+ skipinitialspace=dict(type='bool'),
+ strict=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ dialect = module.params['dialect']
+ key = module.params['key']
+ fieldnames = module.params['fieldnames']
+ unique = module.params['unique']
+
+ if dialect not in csv.list_dialects():
+ module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect)
+
+ dialect_options = dict(
+ delimiter=module.params['delimiter'],
+ skipinitialspace=module.params['skipinitialspace'],
+ strict=module.params['strict'],
+ )
+
+ # Create a dictionary from only set options
+ dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None)
+ if dialect_params:
+ try:
+ csv.register_dialect('custom', dialect, **dialect_params)
+ except TypeError as e:
+ module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e))
+ dialect = 'custom'
+
+ try:
+ with open(path, 'rb') as f:
+ data = f.read()
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unable to open file: %s" % to_text(e))
+
+ if PY3:
+ # Manually decode on Python3 so that we can use the surrogateescape error handler
+ data = to_text(data, errors='surrogate_or_strict')
+ fake_fh = StringIO(data)
+ else:
+ fake_fh = BytesIO(data)
+
+ reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
+
+ if key and key not in reader.fieldnames:
+ module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
+
+ data_dict = dict()
+ data_list = list()
+
+ if key is None:
+ try:
+ for row in reader:
+ data_list.append(row)
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+ else:
+ try:
+ for row in reader:
+ if unique and row[key] in data_dict:
+ module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
+ data_dict[row[key]] = row
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+
+ module.exit_json(dict=data_dict, list=data_list)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py
new file mode 100644
index 00000000..78007f1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py
@@ -0,0 +1,756 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller ex. reboot, log management.
+ - Manages OOB controller users ex. add, remove, update.
+ - Manages system power ex. on, off, graceful and forced reboot.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - Username for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ id:
+ required: false
+ aliases: [ account_id ]
+ description:
+ - ID of account to delete/modify
+ type: str
+ new_username:
+ required: false
+ aliases: [ account_username ]
+ description:
+ - Username of account to add/delete/modify
+ type: str
+ new_password:
+ required: false
+ aliases: [ account_password ]
+ description:
+ - New password of account to add/modify
+ type: str
+ roleid:
+ required: false
+ aliases: [ account_roleid ]
+ description:
+ - Role of account to add/modify
+ type: str
+ bootdevice:
+ required: false
+ description:
+ - bootdevice when setting boot configuration
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ uefi_target:
+ required: false
+ description:
+ - UEFI target when bootdevice is "UefiTarget"
+ type: str
+ boot_next:
+ required: false
+ description:
+ - BootNext target when bootdevice is "UefiBootNext"
+ type: str
+ update_username:
+ required: false
+ aliases: [ account_updatename ]
+ description:
+ - new update user name for account_username
+ type: str
+ version_added: '0.2.0'
+ account_properties:
+ required: false
+ description:
+ - properties of account service to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ update_image_uri:
+ required: false
+ description:
+ - The URI of the image for the update
+ type: str
+ version_added: '0.2.0'
+ update_protocol:
+ required: false
+ description:
+ - The protocol for the update
+ type: str
+ version_added: '0.2.0'
+ update_targets:
+ required: false
+ description:
+ - The list of target resource URIs to apply the update to
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ update_creds:
+ required: false
+ description:
+ - The credentials for retrieving the update image
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ username:
+ required: false
+ description:
+ - The username for retrieving the update image
+ type: str
+ password:
+ required: false
+ description:
+ - The password for retrieving the update image
+ type: str
+ virtual_media:
+ required: false
+ description:
+ - The options for VirtualMedia commands
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ media_types:
+ required: false
+ description:
+ - The list of media types appropriate for the image
+ type: list
+ elements: str
+ image_url:
+ required: false
+ description:
+ - The URL od the image the insert or eject
+ type: str
+ inserted:
+ required: false
+ description:
+ - Indicates if the image is treated as inserted on command completion
+ type: bool
+ default: True
+ write_protected:
+ required: false
+ description:
+ - Indicates if the media is treated as write-protected
+ type: bool
+ default: True
+ username:
+ required: false
+ description:
+ - The username for accessing the image URL
+ type: str
+ password:
+ required: false
+ description:
+ - The password for accessing the image URL
+ type: str
+ transfer_protocol_type:
+ required: false
+ description:
+ - The network protocol to use with the image
+ type: str
+ transfer_method:
+ required: false
+ description:
+ - The transfer method to use with the image
+ type: str
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Restart system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulRestart
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Turn system power off
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceOff
+ resource_id: 437XR1138R2
+
+ - name: Restart system power forcefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceRestart
+ resource_id: 437XR1138R2
+
+ - name: Shutdown system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulShutdown
+ resource_id: 437XR1138R2
+
+ - name: Turn system power on
+ community.general.redfish_command:
+ category: Systems
+ command: PowerOn
+ resource_id: 437XR1138R2
+
+ - name: Reboot system power
+ community.general.redfish_command:
+ category: Systems
+ command: PowerReboot
+ resource_id: 437XR1138R2
+
+ - name: Set one-time boot device to {{ bootdevice }}
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiTarget"
+ uefi_target: "/0x31/0x33/0x01/0x01"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to BootNext target of "Boot0001"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiBootNext"
+ boot_next: "Boot0001"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: EnableContinuousBootOverride
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: DisableBootOverride
+
+ - name: Set chassis indicator LED to blink
+ community.general.redfish_command:
+ category: Chassis
+ command: IndicatorLedBlink
+ resource_id: 1U
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Add user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Add user using new option aliases
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+ account_roleid: "{{ account_roleid }}"
+
+ - name: Delete user
+ community.general.redfish_command:
+ category: Accounts
+ command: DeleteUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Disable user
+ community.general.redfish_command:
+ category: Accounts
+ command: DisableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Add and enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser,EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user password
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserPassword
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+
+ - name: Update user role
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserRole
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_updatename: "{{ account_updatename }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ update_username: "{{ update_username }}"
+
+ - name: Update AccountService properties
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateAccountServiceProperties
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_properties:
+ AccountLockoutThreshold: 5
+ AccountLockoutDuration: 600
+
+ - name: Clear Manager Logs with a timeout of 20 seconds
+ community.general.redfish_command:
+ category: Manager
+ command: ClearLogs
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Clear Sessions
+ community.general.redfish_command:
+ category: Sessions
+ command: ClearSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Simple update
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: https://example.com/myupdate.img
+
+ - name: Simple update with additional options
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: //example.com/myupdate.img
+ update_protocol: FTP
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: BMC
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: BMC
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: GracefulRestart
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulRestart
+ resource_id: BMC
+
+ - name: Turn manager power off
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceOff
+ resource_id: BMC
+
+ - name: Restart manager power forcefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceRestart
+ resource_id: BMC
+
+ - name: Shutdown manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulShutdown
+ resource_id: BMC
+
+ - name: Turn manager power on
+ community.general.redfish_command:
+ category: Manager
+ command: PowerOn
+ resource_id: BMC
+
+ - name: Reboot manager power
+ community.general.redfish_command:
+ category: Manager
+ command: PowerReboot
+ resource_id: BMC
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
+ "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"],
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
+ "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
+ "UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
+ "UpdateAccountServiceProperties"],
+ "Sessions": ["ClearSessions"],
+ "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
+ "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
+ "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
+ "Update": ["SimpleUpdate"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ id=dict(aliases=["account_id"]),
+ new_username=dict(aliases=["account_username"]),
+ new_password=dict(aliases=["account_password"], no_log=True),
+ roleid=dict(aliases=["account_roleid"]),
+ update_username=dict(type='str', aliases=["account_updatename"]),
+ account_properties=dict(type='dict', default={}),
+ bootdevice=dict(),
+ timeout=dict(type='int', default=10),
+ uefi_target=dict(),
+ boot_next=dict(),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ update_protocol=dict(),
+ update_targets=dict(type='list', elements='str', default=[]),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # user to add/modify/delete
+ user = {'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # update options
+ update_opts = {
+ 'update_image_uri': module.params['update_image_uri'],
+ 'update_protocol': module.params['update_protocol'],
+ 'update_targets': module.params['update_targets'],
+ 'update_creds': module.params['update_creds']
+ }
+
+ # Boot override options
+ boot_opts = {
+ 'bootdevice': module.params['bootdevice'],
+ 'uefi_target': module.params['uefi_target'],
+ 'boot_next': module.params['boot_next']
+ }
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Accounts":
+ ACCOUNTS_COMMANDS = {
+ "AddUser": rf_utils.add_user,
+ "EnableUser": rf_utils.enable_user,
+ "DeleteUser": rf_utils.delete_user,
+ "DisableUser": rf_utils.disable_user,
+ "UpdateUserRole": rf_utils.update_user_role,
+ "UpdateUserPassword": rf_utils.update_user_password,
+ "UpdateUserName": rf_utils.update_user_name,
+ "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
+ }
+
+ # execute only if we find an Account service resource
+ result = rf_utils._find_accountservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ result = ACCOUNTS_COMMANDS[command](user)
+
+ elif category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command.startswith('Power'):
+ result = rf_utils.manage_system_power(command)
+ elif command == "SetOneTimeBoot":
+ boot_opts['override_enabled'] = 'Once'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "EnableContinuousBootOverride":
+ boot_opts['override_enabled'] = 'Continuous'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "DisableBootOverride":
+ boot_opts['override_enabled'] = 'Disabled'
+ result = rf_utils.set_boot_override(boot_opts)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command in led_commands:
+ result = rf_utils.manage_indicator_led(command)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ClearSessions":
+ result = rf_utils.clear_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ # standardize on the Power* commands, but allow the the legacy
+ # GracefulRestart command
+ if command == 'GracefulRestart':
+ command = 'PowerGracefulRestart'
+
+ if command.startswith('Power'):
+ result = rf_utils.manage_manager_power(command)
+ elif command == 'ClearLogs':
+ result = rf_utils.clear_logs()
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media)
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "SimpleUpdate":
+ result = rf_utils.simple_update(update_opts)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ changed = result.get('changed', True)
+ module.exit_json(changed=changed, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py
new file mode 100644
index 00000000..26b692a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_config
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ set or update a configuration attribute.
+ - Manages BIOS configuration settings.
+ - Manages OOB controller configuration settings.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ bios_attribute_name:
+ required: false
+ description:
+ - name of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: str
+ bios_attribute_value:
+ required: false
+ description:
+ - value of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: raw
+ bios_attributes:
+ required: false
+ description:
+ - dictionary of BIOS attributes to update
+ default: {}
+ type: dict
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ boot_order:
+ required: false
+ description:
+ - list of BootOptionReference strings specifying the BootOrder
+ default: []
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ network_protocols:
+ required: false
+ description:
+ - setting dict of manager services to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ nic_addr:
+ required: false
+ description:
+ - EthernetInterface Address string on OOB controller
+ default: 'null'
+ type: str
+ version_added: '0.2.0'
+ nic_config:
+ required: false
+ description:
+ - setting dict of EthernetInterface on OOB controller
+ type: dict
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Set BootMode to UEFI
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Uefi"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set multiple BootMode attributes
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable PXE Boot for NIC1 using deprecated options
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attribute_name: PxeDev1EnDis
+ bios_attribute_value: Enabled
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set BIOS default settings with a timeout of 20 seconds
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosDefaultSettings
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Set boot order
+ community.general.redfish_config:
+ category: Systems
+ command: SetBootOrder
+ boot_order:
+ - Boot0002
+ - Boot0001
+ - Boot0000
+ - Boot0003
+ - Boot0004
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set boot order to the default
+ community.general.redfish_config:
+ category: Systems
+ command: SetDefaultBootOrder
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager Network Protocols
+ community.general.redfish_config:
+ category: Manager
+ command: SetNetworkProtocols
+ network_protocols:
+ SNMP:
+ ProtocolEnabled: True
+ Port: 161
+ HTTP:
+ ProtocolEnabled: False
+ Port: 8080
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager NIC
+ community.general.redfish_config:
+ category: Manager
+ command: SetManagerNic
+ nic_config:
+ DHCPv4:
+ DHCPEnabled: False
+ IPv4StaticAddresses:
+ Address: 192.168.1.3
+ Gateway: 192.168.1.1
+ SubnetMask: 255.255.255.0
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
+ "SetDefaultBootOrder"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ bios_attribute_name=dict(default='null'),
+ bios_attribute_value=dict(default='null', type='raw'),
+ bios_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ boot_order=dict(type='list', elements='str', default=[]),
+ network_protocols=dict(
+ type='dict',
+ default={}
+ ),
+ resource_id=dict(),
+ nic_addr=dict(default='null'),
+ nic_config=dict(
+ type='dict',
+ default={}
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # BIOS attributes to update
+ bios_attributes = module.params['bios_attributes']
+ if module.params['bios_attribute_name'] != 'null':
+ bios_attributes[module.params['bios_attribute_name']] = module.params[
+ 'bios_attribute_value']
+ module.deprecate(msg='The bios_attribute_name/bios_attribute_value '
+ 'options are deprecated. Use bios_attributes instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ # boot order
+ boot_order = module.params['boot_order']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # manager nic
+ nic_addr = module.params['nic_addr']
+ nic_config = module.params['nic_config']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetBiosDefaultSettings":
+ result = rf_utils.set_bios_default_settings()
+ elif command == "SetBiosAttributes":
+ result = rf_utils.set_bios_attributes(bios_attributes)
+ elif command == "SetBootOrder":
+ result = rf_utils.set_boot_order(boot_order)
+ elif command == "SetDefaultBootOrder":
+ result = rf_utils.set_default_boot_order()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetNetworkProtocols":
+ result = rf_utils.set_network_protocols(module.params['network_protocols'])
+ elif command == "SetManagerNic":
+ result = rf_utils.set_manager_nic(nic_addr, nic_config)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py
new file mode 100644
index 00000000..a4599588
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py
@@ -0,0 +1,930 @@
+#!/usr/bin/python
+
+# James Laska (jlaska@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redhat_subscription
+short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
+description:
+ - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
+author: "Barnaby Court (@barnabycourt)"
+notes:
+ - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
+ - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
+ I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
+ I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to None.
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ choices: [ "present", "absent" ]
+ default: "present"
+ type: str
+ username:
+ description:
+ - access.redhat.com or Sat6 username
+ type: str
+ password:
+ description:
+ - access.redhat.com or Sat6 password
+ type: str
+ server_hostname:
+ description:
+ - Specify an alternative Red Hat Subscription Management or Sat6 server
+ type: str
+ server_insecure:
+ description:
+ - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ type: str
+ rhsm_baseurl:
+ description:
+ - Specify CDN baseurl
+ type: str
+ rhsm_repo_ca_cert:
+ description:
+ - Specify an alternative location for a CA certificate for CDN
+ type: str
+ server_proxy_hostname:
+ description:
+ - Specify a HTTP proxy hostname
+ type: str
+ server_proxy_port:
+ description:
+ - Specify a HTTP proxy port
+ type: str
+ server_proxy_user:
+ description:
+ - Specify a user for HTTP proxy with basic authentication
+ type: str
+ server_proxy_password:
+ description:
+ - Specify a password for HTTP proxy with basic authentication
+ type: str
+ auto_attach:
+ description:
+ - Upon successful registration, auto-consume available subscriptions
+ - Added in favor of deprecated autosubscribe in 2.5.
+ type: bool
+ aliases: [autosubscribe]
+ activationkey:
+ description:
+ - supply an activation key for use with registration
+ type: str
+ org_id:
+ description:
+ - Organization ID to use in conjunction with activationkey
+ type: str
+ environment:
+ description:
+ - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
+ type: str
+ pool:
+ description:
+ - |
+ Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
+ possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ default: '^$'
+ type: str
+ pool_ids:
+ description:
+ - |
+ Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
+ C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ default: []
+ type: list
+ consumer_type:
+ description:
+ - The type of unit to register, defaults to system
+ type: str
+ consumer_name:
+ description:
+ - Name of the system to register, defaults to the hostname
+ type: str
+ consumer_id:
+ description:
+ - |
+ References an existing consumer ID to resume using a previous registration
+ for this system. If the system's identity certificate is lost or corrupted,
+ this option allows it to resume using its previous identity and subscriptions.
+ The default is to not specify a consumer ID so a new ID is created.
+ type: str
+ force_register:
+ description:
+ - Register the system even if it is already registered
+ type: bool
+ default: no
+ release:
+ description:
+ - Set a release version
+ type: str
+ syspurpose:
+ description:
+ - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
+ and synchronize these attributes with RHSM server. Syspurpose attributes help attach
+ the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
+ already contains some attributes, then new attributes overwrite existing attributes.
+ When some attribute is not listed in the new list of attributes, the existing
+ attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
+ type: dict
+ default: {}
+ suboptions:
+ usage:
+ description: Syspurpose attribute usage
+ type: str
+ role:
+ description: Syspurpose attribute role
+ type: str
+ service_level_agreement:
+ description: Syspurpose attribute service_level_agreement
+ type: str
+ addons:
+ description: Syspurpose attribute addons
+ type: list
+ sync:
+ description:
+ - When this option is true, then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is false, then syspurpose attributes
+ will be synchronized with RHSM server by rhsmcertd daemon.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+
+- name: Same as above but subscribe to a specific pool by ID.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids: 0123456789abcdef0123456789abcdef
+
+- name: Register and subscribe to multiple pools.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+- name: Same as above but consume multiple entitlements.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef: 2
+ - 1123456789abcdef0123456789abcdef: 4
+
+- name: Register and pull existing system data.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+
+- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^Red Hat Enterprise Server$'
+
+- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ auto_attach: true
+
+- name: Register as user (joe_user) with password (somepass) and a specific release
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ release: 7.4
+
+- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+ syspurpose:
+ usage: "Production"
+ role: "Red Hat Enterprise Server"
+ service_level_agreement: "Premium"
+ addons:
+ - addon1
+ - addon2
+ sync: true
+'''
+
+RETURN = '''
+subscribed_pool_ids:
+ description: List of pool IDs to which system is now subscribed
+ returned: success
+ type: complex
+ sample: {
+ "8a85f9815ab905d3015ab928c7005de4": "1"
+ }
+'''
+
+from os.path import isfile
+from os import unlink
+import re
+import shutil
+import tempfile
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves import configparser
+
+
+SUBMAN_CMD = None
+
+
+class RegistrationBase(object):
+
+ REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
+
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', '1')
+ else:
+ cfg.set('main', 'enabled', '0')
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.module = module
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHSM
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ args = [SUBMAN_CMD, 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--server.hostname'.
+ options = []
+ for k, v in sorted(kwargs.items()):
+ if re.search(r'^(server|rhsm)_', k) and v is not None:
+ options.append('--%s=%s' % (k.replace('_', '.', 1), v))
+
+ # When there is nothing to configure, then it is not necessary
+ # to run config command, because it only returns current
+ # content of current configuration file
+ if len(options) == 0:
+ return
+
+ args.extend(options)
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHSM.
+ '''
+
+ args = [SUBMAN_CMD, 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register, environment,
+ rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
+ server_proxy_port, server_proxy_user, server_proxy_password, release):
+ '''
+ Register the current system to the provided RHSM or Sat6 server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'register']
+
+ # Generate command arguments
+ if force_register:
+ args.extend(['--force'])
+
+ if rhsm_baseurl:
+ args.extend(['--baseurl', rhsm_baseurl])
+
+ if server_insecure:
+ args.extend(['--insecure'])
+
+ if server_hostname:
+ args.extend(['--serverurl', server_hostname])
+
+ if org_id:
+ args.extend(['--org', org_id])
+
+ if server_proxy_hostname and server_proxy_port:
+ args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
+
+ if server_proxy_user:
+ args.extend(['--proxyuser', server_proxy_user])
+
+ if server_proxy_password:
+ args.extend(['--proxypassword', server_proxy_password])
+
+ if activationkey:
+ args.extend(['--activationkey', activationkey])
+ else:
+ if auto_attach:
+ args.append('--auto-attach')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+ if consumer_type:
+ args.extend(['--type', consumer_type])
+ if consumer_name:
+ args.extend(['--name', consumer_name])
+ if consumer_id:
+ args.extend(['--consumerid', consumer_id])
+ if environment:
+ args.extend(['--environment', environment])
+
+ if release:
+ args.extend(['--release', release])
+
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def unsubscribe(self, serials=None):
+ '''
+ Unsubscribe a system from subscribed channels
+ Args:
+ serials(list or None): list of serials to unsubscribe. If
+ serials is none or an empty list, then
+ all subscribed channels will be removed.
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ items = []
+ if serials is not None and serials:
+ items = ["--serial=%s" % s for s in serials]
+ if serials is None:
+ items = ["--all"]
+
+ if items:
+ args = [SUBMAN_CMD, 'unsubscribe'] + items
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ return serials
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression. It matches regexp against available pool ids first.
+ If any pool ids match, subscribe to those pools and return.
+
+ If no pool ids match, then match regexp against available pool product
+ names. Note this can still easily match many many pools. Then subscribe
+ to those pools.
+
+ Since a pool id is a more specific match, we only fallback to matching
+ against names if we didn't match pool ids.
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ # See https://github.com/ansible/ansible/issues/19466
+
+ # subscribe to pools whose pool id matches regexp (and only the pool id)
+ subscribed_pool_ids = self.subscribe_pool(regexp)
+
+ # If we found any matches, we are done
+ # Don't attempt to match pools by product name
+ if subscribed_pool_ids:
+ return subscribed_pool_ids
+
+ # We didn't match any pool ids.
+ # Now try subscribing to pools based on product name match
+ # Note: This can match lots of product names.
+ subscribed_by_product_pool_ids = self.subscribe_product(regexp)
+ if subscribed_by_product_pool_ids:
+ return subscribed_by_product_pool_ids
+
+ # no matches
+ return []
+
+ def subscribe_by_pool_ids(self, pool_ids):
+ """
+ Try to subscribe to the list of pool IDs
+ """
+ available_pools = RhsmPools(self.module)
+
+ available_pool_ids = [p.get_pool_id() for p in available_pools]
+
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if pool_id in available_pool_ids:
+ args = [SUBMAN_CMD, 'attach', '--pool', pool_id]
+ if quantity is not None:
+ args.extend(['--quantity', to_native(quantity)])
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ else:
+ self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
+ return pool_ids
+
+ def subscribe_pool(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_pools(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def subscribe_product(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_products(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def update_subscriptions(self, regexp):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+ pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
+ pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
+
+ serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ subscribed_pool_ids = self.subscribe(regexp)
+
+ if subscribed_pool_ids or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
+ 'unsubscribed_serials': serials}
+
+ def update_subscriptions_by_pool_ids(self, pool_ids):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+
+ existing_pools = {}
+ for p in consumed_pools:
+ existing_pools[p.get_pool_id()] = p.QuantityUsed
+
+ serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ missing_pools = {}
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if existing_pools.get(pool_id, 0) != quantity:
+ missing_pools[pool_id] = quantity
+
+ self.subscribe_by_pool_ids(missing_pools)
+
+ if missing_pools or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
+ 'unsubscribed_serials': serials}
+
+ def sync_syspurpose(self):
+ """
+ Try to synchronize syspurpose attributes with server
+ """
+ args = [SUBMAN_CMD, 'status']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def get_pool_id(self):
+ return getattr(self, 'PoolId', getattr(self, 'PoolID'))
+
+ def subscribe(self):
+ args = "subscription-manager attach --pool %s" % self.get_pool_id()
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+
+ def __init__(self, module, consumed=False):
+ self.module = module
+ self.products = self._load_product_list(consumed)
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self, consumed=False):
+ """
+ Loads list of all available or consumed pools for system in data structure
+
+ Args:
+ consumed(bool): if True list consumed pools, else list available pools (default False)
+ """
+ args = "subscription-manager list"
+ if consumed:
+ args += " --consumed"
+ else:
+ args += " --available"
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of a output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter_pools(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose pool id matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product.get_pool_id()):
+ yield product
+
+ def filter_products(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose product name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
+
+
+class SysPurpose(object):
+ """
+ This class is used for reading and writing to syspurpose.json file
+ """
+
+ SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
+
+ ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
+
+ def __init__(self, path=None):
+ """
+ Initialize class used for reading syspurpose json file
+ """
+ self.path = path or self.SYSPURPOSE_FILE_PATH
+
+ def update_syspurpose(self, new_syspurpose):
+ """
+ Try to update current syspurpose with new attributes from new_syspurpose
+ """
+ syspurpose = {}
+ syspurpose_changed = False
+ for key, value in new_syspurpose.items():
+ if key in self.ALLOWED_ATTRIBUTES:
+ if value is not None:
+ syspurpose[key] = value
+ elif key == 'sync':
+ pass
+ else:
+ raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
+ (key, self.ALLOWED_ATTRIBUTES))
+ current_syspurpose = self._read_syspurpose()
+ if current_syspurpose != syspurpose:
+ syspurpose_changed = True
+ # Update current syspurpose with new values
+ current_syspurpose.update(syspurpose)
+ # When some key is not listed in new syspurpose, then delete it from current syspurpose
+ # and ignore custom attributes created by user (e.g. "foo": "bar")
+ for key in list(current_syspurpose):
+ if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
+ del current_syspurpose[key]
+ self._write_syspurpose(current_syspurpose)
+ return syspurpose_changed
+
+ def _write_syspurpose(self, new_syspurpose):
+ """
+ This function tries to update current new_syspurpose attributes to
+ json file.
+ """
+ with open(self.path, "w") as fp:
+ fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
+
+ def _read_syspurpose(self):
+ """
+ Read current syspurpuse from json file.
+ """
+ current_syspurpose = {}
+ try:
+ with open(self.path, "r") as fp:
+ content = fp.read()
+ except IOError:
+ pass
+ else:
+ current_syspurpose = json.loads(content)
+ return current_syspurpose
+
+
+def main():
+
+ # Load RHSM configuration from file
+ rhsm = Rhsm(None)
+
+ # Note: the default values for parameters are:
+ # 'type': 'str', 'default': None, 'required': False
+ # So there is no need to repeat these values for each parameter.
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'server_hostname': {},
+ 'server_insecure': {},
+ 'rhsm_baseurl': {},
+ 'rhsm_repo_ca_cert': {},
+ 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'activationkey': {'no_log': True},
+ 'org_id': {},
+ 'environment': {},
+ 'pool': {'default': '^$'},
+ 'pool_ids': {'default': [], 'type': 'list'},
+ 'consumer_type': {},
+ 'consumer_name': {},
+ 'consumer_id': {},
+ 'force_register': {'default': False, 'type': 'bool'},
+ 'server_proxy_hostname': {},
+ 'server_proxy_port': {},
+ 'server_proxy_user': {},
+ 'server_proxy_password': {'no_log': True},
+ 'release': {},
+ 'syspurpose': {
+ 'type': 'dict',
+ 'options': {
+ 'role': {},
+ 'usage': {},
+ 'service_level_agreement': {},
+ 'addons': {'type': 'list'},
+ 'sync': {'type': 'bool', 'default': False}
+ }
+ }
+ },
+ required_together=[['username', 'password'],
+ ['server_proxy_hostname', 'server_proxy_port'],
+ ['server_proxy_user', 'server_proxy_password']],
+ mutually_exclusive=[['activationkey', 'username'],
+ ['activationkey', 'consumer_id'],
+ ['activationkey', 'environment'],
+ ['activationkey', 'autosubscribe'],
+ ['pool', 'pool_ids']],
+ required_if=[['state', 'present', ['username', 'activationkey'], True]],
+ )
+
+ rhsm.module = module
+ state = module.params['state']
+ username = module.params['username']
+ password = module.params['password']
+ server_hostname = module.params['server_hostname']
+ server_insecure = module.params['server_insecure']
+ rhsm_baseurl = module.params['rhsm_baseurl']
+ rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
+ auto_attach = module.params['auto_attach']
+ activationkey = module.params['activationkey']
+ org_id = module.params['org_id']
+ if activationkey and not org_id:
+ module.fail_json(msg='org_id is required when using activationkey')
+ environment = module.params['environment']
+ pool = module.params['pool']
+ pool_ids = {}
+ for value in module.params['pool_ids']:
+ if isinstance(value, dict):
+ if len(value) != 1:
+ module.fail_json(msg='Unable to parse pool_ids option.')
+ pool_id, quantity = list(value.items())[0]
+ else:
+ pool_id, quantity = value, None
+ pool_ids[pool_id] = quantity
+ consumer_type = module.params["consumer_type"]
+ consumer_name = module.params["consumer_name"]
+ consumer_id = module.params["consumer_id"]
+ force_register = module.params["force_register"]
+ server_proxy_hostname = module.params['server_proxy_hostname']
+ server_proxy_port = module.params['server_proxy_port']
+ server_proxy_user = module.params['server_proxy_user']
+ server_proxy_password = module.params['server_proxy_password']
+ release = module.params['release']
+ syspurpose = module.params['syspurpose']
+
+ global SUBMAN_CMD
+ SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
+
+ syspurpose_changed = False
+ if syspurpose is not None:
+ try:
+ syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
+ except Exception as err:
+ module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Register system
+ if rhsm.is_registered and not force_register:
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ try:
+ rhsm.sync_syspurpose()
+ except Exception as e:
+ module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
+ if pool != '^$' or pool_ids:
+ try:
+ if pool_ids:
+ result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
+ else:
+ result = rhsm.update_subscriptions(pool)
+ except Exception as e:
+ module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(**result)
+ else:
+ if syspurpose_changed is True:
+ module.exit_json(changed=True, msg="Syspurpose attributes changed.")
+ else:
+ module.exit_json(changed=False, msg="System already registered.")
+ else:
+ try:
+ rhsm.enable()
+ rhsm.configure(**module.params)
+ rhsm.register(username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register,
+ environment, rhsm_baseurl, server_insecure, server_hostname,
+ server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ rhsm.sync_syspurpose()
+ if pool_ids:
+ subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
+ elif pool != '^$':
+ subscribed_pool_ids = rhsm.subscribe(pool)
+ else:
+ subscribed_pool_ids = []
+ except Exception as e:
+ module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(changed=True,
+ msg="System successfully registered to '%s'." % server_hostname,
+ subscribed_pool_ids=subscribed_pool_ids)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhsm.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+ else:
+ try:
+ rhsm.unsubscribe()
+ rhsm.unregister()
+ except Exception as e:
+ module.fail_json(msg="Failed to unregister: %s" % to_native(e))
+ else:
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py
new file mode 100644
index 00000000..5ffbd7db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, slave and flush
+description:
+ - Unified utility to interact with redis instances.
+options:
+ command:
+ description:
+ - The selected redis command
+ - C(config) (new in 1.6), ensures a configuration setting on an instance.
+ - C(flush) flushes all the instance or a specified db.
+ - C(slave) sets a redis instance in slave or master mode.
+ choices: [ config, flush, slave ]
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with (usually not used)
+ type: str
+ login_host:
+ description:
+ - The host running the database
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The port to connect to
+ default: 6379
+ type: int
+ master_host:
+ description:
+ - The host of the master instance [slave command]
+ type: str
+ master_port:
+ description:
+ - The port of the master instance [slave command]
+ type: int
+ slave_mode:
+ description:
+ - the mode of the redis instance [slave command]
+ default: slave
+ choices: [ master, slave ]
+ type: str
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ type: int
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ default: all
+ choices: [ all, db ]
+ type: str
+ name:
+ description:
+ - A redis config key.
+ type: str
+ value:
+ description:
+ - A redis config value. When memory size is needed, it is possible
+ to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
+ type: str
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making slave of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+seealso:
+ - module: community.general.redis_info
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+- name: Set local redis instance to be slave of melee.island on port 6377
+ community.general.redis:
+ command: slave
+ master_host: melee.island
+ master_port: 6377
+
+- name: Deactivate slave mode
+ community.general.redis:
+ command: slave
+ slave_mode: master
+
+- name: Flush all the redis db
+ community.general.redis:
+ command: flush
+ flush_mode: all
+
+- name: Flush only one db in a redis instance
+ community.general.redis:
+ command: flush
+ db: 1
+ flush_mode: db
+
+- name: Configure local redis to have 10000 max clients
+ community.general.redis:
+ command: config
+ name: maxclients
+ value: 10000
+
+- name: Configure local redis maxmemory to 4GB
+ community.general.redis:
+ command: config
+ name: maxmemory
+ value: 4GB
+
+- name: Configure local redis to have lua time limit of 100 ms
+ community.general.redis:
+ command: config
+ name: lua-time-limit
+ value: 100
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ import redis
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ redis_found = False
+else:
+ redis_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils._text import to_native
+import re
+
+
+# Redis module specific support methods.
+def set_slave_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if not isinstance(db, int):
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(type='str', choices=['config', 'flush', 'slave']),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ master_host=dict(type='str'),
+ master_port=dict(type='int'),
+ slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
+ db=dict(type='int'),
+ flush_mode=dict(type='str', default='all', choices=['all', 'db']),
+ name=dict(type='str'),
+ value=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ if not redis_found:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ command = module.params['command']
+
+ # Slave Command section -----------
+ if command == "slave":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['slave_mode']
+
+ # Check if we have all the data
+ if mode == "slave": # Only need data if we want to be slave
+ if not master_host:
+ module.fail_json(msg='In slave mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(msg='In slave mode master port must be provided')
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
+ status = dict(
+ status=mode,
+ master_host=master_host,
+ master_port=master_port,
+ )
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "slave":
+ if module.check_mode or\
+ set_slave_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set slave mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ # Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(msg="In db mode the db number must be provided")
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+
+ try: # try to parse the value as if it were the memory size
+ if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
+ value = str(human_to_bytes(module.params['value'].upper()))
+ else:
+ value = module.params['value']
+ except ValueError:
+ value = module.params['value']
+
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception as e:
+ module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception as e:
+ module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py
new file mode 100644
index 00000000..b615addb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redis_info
+short_description: Gather information about Redis servers
+version_added: '0.2.0'
+description:
+- Gathers information and statistics about Redis servers.
+options:
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to.
+ type: int
+ default: 6379
+ login_password:
+ description:
+ - The password used to authenticate with, when authentication is enabled for the Redis server.
+ type: str
+notes:
+- Requires the redis-py Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ U(https://github.com/andymccurdy/redis-py)
+seealso:
+- module: community.general.redis
+requirements: [ redis ]
+author: "Pavlo Bashynskyi (@levonet)"
+'''
+
+EXAMPLES = r'''
+- name: Get server information
+ community.general.redis_info:
+ register: result
+
+- name: Print server information
+ ansible.builtin.debug:
+ var: result.info
+'''
+
+RETURN = r'''
+info:
+ description: The default set of server information sections U(https://redis.io/commands/info).
+ returned: success
+ type: dict
+ sample: {
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 932409344,
+ "allocator_allocated": 932062792,
+ "allocator_frag_bytes": 346552,
+ "allocator_frag_ratio": 1.0,
+ "allocator_resident": 947253248,
+ "allocator_rss_bytes": 14843904,
+ "allocator_rss_ratio": 1.02,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_enabled": 0,
+ "aof_last_bgrewrite_status": "ok",
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_last_write_status": "ok",
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "atomicvar_api": "atomic-builtin",
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 4,
+ "client_recent_max_output_buffer": 0,
+ "cluster_enabled": 0,
+ "config_file": "",
+ "configured_hz": 10,
+ "connected_clients": 4,
+ "connected_slaves": 0,
+ "db0": {
+ "avg_ttl": 1945628530,
+ "expires": 16,
+ "keys": 3341411
+ },
+ "evicted_keys": 0,
+ "executable": "/data/redis-server",
+ "expired_keys": 9,
+ "expired_stale_perc": 1.72,
+ "expired_time_cap_reached_count": 0,
+ "gcc_version": "9.2.0",
+ "hz": 10,
+ "instantaneous_input_kbps": 0.0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0.0,
+ "keyspace_hits": 0,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 0,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 11603632,
+ "master_repl_offset": 118831417,
+ "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e",
+ "master_replid2": "0000000000000000000000000000000000000000",
+ "maxmemory": 0,
+ "maxmemory_human": "0B",
+ "maxmemory_policy": "noeviction",
+ "mem_allocator": "jemalloc-5.1.0",
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 49694,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 12355480,
+ "mem_fragmentation_ratio": 1.01,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 1048576,
+ "migrate_cached_sockets": 0,
+ "multiplexing_api": "epoll",
+ "number_of_cached_scripts": 0,
+ "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64",
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 671,
+ "rdb_current_bgsave_time_sec": -1,
+ "rdb_last_bgsave_status": "ok",
+ "rdb_last_bgsave_time_sec": -1,
+ "rdb_last_cow_size": 0,
+ "rdb_last_save_time": 1588702236,
+ "redis_build_id": "a31260535f820267",
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "redis_mode": "standalone",
+ "redis_version": "999.999.999",
+ "rejected_connections": 0,
+ "repl_backlog_active": 1,
+ "repl_backlog_first_byte_offset": 118707937,
+ "repl_backlog_histlen": 123481,
+ "repl_backlog_size": 1048576,
+ "role": "master",
+ "rss_overhead_bytes": -3051520,
+ "rss_overhead_ratio": 1.0,
+ "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4",
+ "second_repl_offset": 118830003,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 885,
+ "total_connections_received": 10,
+ "total_net_input_bytes": 802709255,
+ "total_net_output_bytes": 31754,
+ "total_system_memory": 135029538816,
+ "total_system_memory_human": "125.76G",
+ "uptime_in_days": 53,
+ "uptime_in_seconds": 4631778,
+ "used_cpu_sys": 4.668282,
+ "used_cpu_sys_children": 0.002191,
+ "used_cpu_user": 4.21088,
+ "used_cpu_user_children": 0.0,
+ "used_memory": 931908760,
+ "used_memory_dataset": 910774306,
+ "used_memory_dataset_perc": "97.82%",
+ "used_memory_human": "888.74M",
+ "used_memory_lua": 37888,
+ "used_memory_lua_human": "37.00K",
+ "used_memory_overhead": 21134454,
+ "used_memory_peak": 932015216,
+ "used_memory_peak_human": "888.84M",
+ "used_memory_peak_perc": "99.99%",
+ "used_memory_rss": 944201728,
+ "used_memory_rss_human": "900.46M",
+ "used_memory_scripts": 0,
+ "used_memory_scripts_human": "0B",
+ "used_memory_startup": 791264
+ }
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import StrictRedis
+ HAS_REDIS_PACKAGE = True
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def redis_client(**client_params):
+ return StrictRedis(**client_params)
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ login_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_REDIS_PACKAGE:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_password = module.params['login_password']
+
+ # Connect and check
+ client = redis_client(host=login_host, port=login_port, password=login_password)
+ try:
+ client.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ info = client.info()
+ module.exit_json(changed=False, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py
new file mode 100644
index 00000000..2e5f080d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_sync
+short_description: Sync Cobbler
+description:
+- Sync Cobbler to commit changes.
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+author:
+- Dag Wieers (@dagwieers)
+todo:
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Commit Cobbler changes
+ community.general.cobbler_sync:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ run_once: yes
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=True,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
+
+ if not module.check_mode:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py
new file mode 100644
index 00000000..ecabcc8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_system
+short_description: Manage system objects in Cobbler
+description:
+- Add, modify or remove systems in Cobbler
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The system name to manage.
+ type: str
+ properties:
+ description:
+ - A dictionary with system properties.
+ type: dict
+ interfaces:
+ description:
+ - A list of dictionaries containing interface options.
+ type: dict
+ sync:
+ description:
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
+ type: bool
+ default: no
+ state:
+ description:
+ - Whether the system should be present, absent or a query is made.
+ choices: [ absent, present, query ]
+ default: present
+ type: str
+author:
+- Dag Wieers (@dagwieers)
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Ensure the system exists in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ properties:
+ profile: CentOS6-x86_64
+ name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers_search: foo.com, bar.com
+ interfaces:
+ eth0:
+ macaddress: 00:01:02:03:04:05
+ ipaddress: 1.2.3.4
+ delegate_to: localhost
+
+- name: Enable network boot in Cobbler
+ community.general.cobbler_system:
+ host: bdsol-aci-cobbler-01
+ username: cobbler
+ password: ins3965!
+ name: bdsol-aci51-apic1.cisco.com
+ properties:
+ netboot_enabled: yes
+ state: present
+ delegate_to: localhost
+
+- name: Query all systems in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ state: query
+ register: cobbler_systems
+ delegate_to: localhost
+
+- name: Query a specific system in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: '{{ inventory_hostname }}'
+ state: query
+ register: cobbler_properties
+ delegate_to: localhost
+
+- name: Ensure the system does not exist in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+systems:
+ description: List of systems
+ returned: C(state=query) and C(name) is not provided
+ type: list
+system:
+ description: (Resulting) information about the system we are working with
+ returned: when C(name) is provided
+ type: dict
+'''
+
+import copy
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+IFPROPS_MAPPING = dict(
+ bondingopts='bonding_opts',
+ bridgeopts='bridge_opts',
+ connected_mode='connected_mode',
+ cnames='cnames',
+ dhcptag='dhcp_tag',
+ dnsname='dns_name',
+ ifgateway='if_gateway',
+ interfacetype='interface_type',
+ interfacemaster='interface_master',
+ ipaddress='ip_address',
+ ipv6address='ipv6_address',
+ ipv6defaultgateway='ipv6_default_gateway',
+ ipv6mtu='ipv6_mtu',
+ ipv6prefix='ipv6_prefix',
+ ipv6secondaries='ipv6_secondariesu',
+ ipv6staticroutes='ipv6_static_routes',
+ macaddress='mac_address',
+ management='management',
+ mtu='mtu',
+ netmask='netmask',
+ static='static',
+ staticroutes='static_routes',
+ virtbridge='virt_bridge',
+)
+
+
+def getsystem(conn, name, token):
+ system = dict()
+ if name:
+ # system = conn.get_system(name, token)
+ systems = conn.find_system(dict(name=name), token)
+ if systems:
+ system = systems[0]
+ return system
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ name=dict(type='str'),
+ interfaces=dict(type='dict'),
+ properties=dict(type='dict'),
+ sync=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ state = module.params['state']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=False,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
+
+ system = getsystem(conn, name, token)
+ # result['system'] = system
+
+ if state == 'query':
+ if name:
+ result['system'] = system
+ else:
+ # Turn it into a dictionary of dictionaries
+ # all_systems = conn.get_systems()
+ # result['systems'] = { system['name']: system for system in all_systems }
+
+ # Return a list of dictionaries
+ result['systems'] = conn.get_systems()
+
+ elif state == 'present':
+
+ if system:
+ # Update existing entry
+ system_id = conn.get_system_handle(name, token)
+
+ for key, value in iteritems(module.params['properties']):
+ if key not in system:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if system[key] != value:
+ try:
+ conn.modify_system(system_id, key, value, token)
+ result['changed'] = True
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ else:
+ # Create a new entry
+ system_id = conn.new_system(token)
+ conn.modify_system(system_id, 'name', name, token)
+ result['changed'] = True
+
+ if module.params['properties']:
+ for key, value in iteritems(module.params['properties']):
+ try:
+ conn.modify_system(system_id, key, value, token)
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ # Add interface properties
+ interface_properties = dict()
+ if module.params['interfaces']:
+ for device, values in iteritems(module.params['interfaces']):
+ for key, value in iteritems(values):
+ if key == 'name':
+ continue
+ if key not in IFPROPS_MAPPING:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
+ result['changed'] = True
+ interface_properties['{0}-{1}'.format(key, device)] = value
+
+ if result['changed'] is True:
+ conn.modify_system(system_id, "modify_interface", interface_properties, token)
+
+ # Only save when the entry was changed
+ if not module.check_mode and result['changed']:
+ conn.save_system(system_id, token)
+
+ elif state == 'absent':
+
+ if system:
+ if not module.check_mode:
+ conn.remove_system(name, token)
+ result['changed'] = True
+
+ if not module.check_mode and module.params['sync'] and result['changed']:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
+
+ if state in ('absent', 'present'):
+ result['system'] = getsystem(conn, name, token)
+
+ if module._diff:
+ result['diff'] = dict(before=system, after=result['system'])
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py
new file mode 100644
index 00000000..fa8ac66c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_firmware
+short_description: Firmware update from a repository on a network share (CIFS, NFS).
+description:
+ - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
+ available updates.
+ - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
+ - All applicable updates contained in the repository are applied to the system.
+ - This feature is available only with iDRAC Enterprise License.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ share_name:
+ description: CIFS or NFS Network share.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ share_mnt:
+ description: Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ type: str
+ required: True
+ reboot:
+ description: Whether to reboots after applying the updates or not.
+ type: bool
+ default: false
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: true
+ catalog_file_name:
+ required: False
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: 'Catalog.xml'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+'''
+
+EXAMPLES = """
+---
+- name: Update firmware from repository on a Network Share
+ community.general.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.0:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_pwd"
+ share_mnt: "/mnt/share"
+ reboot: True
+ job_wait: True
+ catalog_file_name: "Catalog.xml"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Over all firmware update status.
+ returned: always
+ sample: "Successfully updated the firmware."
+update_status:
+ type: dict
+ description: Firmware Update job and progress details from the iDRAC.
+ returned: success
+ sample: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+"""
+
+
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+def _validate_catalog_file(catalog_file_name):
+ normilized_file_name = catalog_file_name.lower()
+ if not normilized_file_name:
+ raise ValueError('catalog_file_name should be a non-empty string.')
+ elif not normilized_file_name.endswith("xml"):
+ raise ValueError('catalog_file_name should be an XML file.')
+
+
+def update_firmware(idrac, module):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'] = False
+ msg['update_status'] = {}
+
+ try:
+ upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
+ mount_point=module.params['share_mnt'],
+ isFolder=False,
+ creds=UserCredentials(
+ module.params['share_user'],
+ module.params['share_password'])
+ )
+
+ idrac.use_redfish = True
+ if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
+ idrac.use_redfish = False
+
+ apply_update = True
+ msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
+ apply_update,
+ module.params['reboot'],
+ module.params['job_wait'])
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if "Status" in msg['update_status']:
+ if msg['update_status']['Status'] == "Success":
+ if module.params['job_wait']:
+ msg['changed'] = True
+ else:
+ module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
+ return msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": True, "type": 'str'},
+
+ "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
+ "reboot": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ },
+
+ supports_check_mode=False)
+
+ try:
+ # Validate the catalog file
+ _validate_catalog_file(module.params['catalog_file_name'])
+ # Connect to iDRAC and update firmware
+ with iDRACConnection(module.params) as idrac:
+ update_status = update_firmware(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py
new file mode 100644
index 00000000..39857fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_server_config_profile
+short_description: Export or Import iDRAC Server Configuration Profile (SCP).
+description:
+ - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ command:
+ description:
+ - If C(import), will perform SCP import operations.
+ - If C(export), will perform SCP export operations.
+ choices: ['import', 'export']
+ default: 'export'
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ required: True
+ share_name:
+ description: CIFS or NFS Network Share or a local path.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ scp_file:
+ description: Server Configuration Profile file name. This option is mandatory for C(import) command.
+ type: str
+ scp_components:
+ description:
+ - If C(ALL), this module will import all components configurations from SCP file.
+ - If C(IDRAC), this module will import iDRAC configuration from SCP file.
+ - If C(BIOS), this module will import BIOS configuration from SCP file.
+ - If C(NIC), this module will import NIC configuration from SCP file.
+ - If C(RAID), this module will import RAID configuration from SCP file.
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: 'ALL'
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), it gracefully shuts down the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), it does not reboot the server.
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ choices: ['On' ,'Off']
+ default: 'On'
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ choices: ['JSON', 'XML']
+ default: 'XML'
+ export_use:
+ description: Specify the type of server configuration profile (SCP) to be exported.
+ This option is applicable for C(export) command.
+ choices: ['Default', 'Clone', 'Replace']
+ default: 'Default'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Jagadeesh N V(@jagadeeshnv)"
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Import Server Configuration Profile from a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Import Server Configuration Profile from a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Export Server Configuration Profile to a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+
+- name: Export Server Configuration Profile to a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the import or export SCP job.
+ returned: always
+ sample: "Successfully imported the Server Configuration Profile"
+scp_status:
+ type: dict
+ description: SCP operation job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+'''
+
+import os
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+ from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum,
+ ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum)
+except ImportError:
+ pass
+
+
+def run_import_server_config_profile(idrac, module):
+ """Import Server Configuration Profile from a network share."""
+ target = SCPTargetEnum[module.params['scp_components']]
+ job_wait = module.params['job_wait']
+ end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']]
+ shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(
+ share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']),
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']), isFolder=False)
+ import_status = idrac.config_mgr.scp_import(myshare,
+ target=target, shutdown_type=shutdown_type,
+ end_host_power_state=end_host_power_state,
+ job_wait=job_wait)
+ if not import_status or import_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to import scp.', scp_status=import_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return import_status
+
+
+def run_export_server_config_profile(idrac, module):
+ """Export Server Configuration Profile to a network share."""
+ export_format = ExportFormatEnum[module.params['export_format']]
+ scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower())
+ target = SCPTargetEnum[module.params['scp_components']]
+ export_use = ExportUseEnum[module.params['export_use']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']),
+ isFolder=True)
+ scp_file_name = myshare.new_file(scp_file_name_format)
+ export_status = idrac.config_mgr.scp_export(scp_file_name,
+ target=target,
+ export_format=export_format,
+ export_use=export_use,
+ job_wait=module.params['job_wait'])
+ if not export_status or export_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to export scp.', scp_status=export_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return export_status
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str',
+ "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"required": False,
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
+ "default": 'ALL'},
+
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+ },
+ required_if=[
+ ["command", "import", ["scp_file"]]
+ ],
+ supports_check_mode=False)
+
+ try:
+ changed = False
+ with iDRACConnection(module.params) as idrac:
+ command = module.params['command']
+ if command == 'import':
+ scp_status = run_import_server_config_profile(idrac, module)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ else:
+ scp_status = run_export_server_config_profile(idrac, module)
+ module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command),
+ scp_status=scp_status)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py
new file mode 100644
index 00000000..68fbb1e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.2
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_device_info
+short_description: Retrieves the information about Device.
+description:
+ - This module retrieves the list of all devices information with the exhaustive inventory of each
+ device.
+options:
+ hostname:
+ description:
+ - Target IP Address or hostname.
+ type: str
+ required: True
+ username:
+ description:
+ - Target username.
+ type: str
+ required: True
+ password:
+ description:
+ - Target user password.
+ type: str
+ required: True
+ port:
+ description:
+ - Target HTTPS port.
+ type: int
+ default: 443
+ fact_subset:
+ description:
+ - C(basic_inventory) returns the list of the devices.
+ - C(detailed_inventory) returns the inventory details of specified devices.
+ - C(subsystem_health) returns the health status of specified devices.
+ type: str
+ choices: [basic_inventory, detailed_inventory, subsystem_health ]
+ default: basic_inventory
+ system_query_options:
+ description:
+ - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
+ is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
+ type: dict
+ suboptions:
+ device_id:
+ description:
+ - A list of unique identifier is applicable
+ for C(detailed_inventory) and C(subsystem_health).
+ type: list
+ device_service_tag:
+ description:
+ - A list of service tags are applicable for C(detailed_inventory)
+ and C(subsystem_health).
+ type: list
+ inventory_type:
+ description:
+ - For C(detailed_inventory), it returns details of the specified inventory type.
+ type: str
+ filter:
+ description:
+ - For C(basic_inventory), it filters the collection of devices.
+ I(filter) query format should be aligned with OData standards.
+ type: str
+
+requirements:
+ - "python >= 2.7.5"
+author: "Sajna Shetty(@Sajna-Shetty)"
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve basic inventory of all devices.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+
+- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "basic_inventory"
+ system_query_options:
+ filter: "Id eq 33333 or Id eq 11111"
+
+- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ - 22222
+
+- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+ inventory_type: "serverDeviceCards"
+
+- name: Retrieve subsystem health of specified devices identified by service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "subsystem_health"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to fetch the device information"
+device_info:
+ type: dict
+ description: Returns the information collected from the Device.
+ returned: success
+ sample: {
+ "value": [
+ {
+ "Actions": null,
+ "AssetTag": null,
+ "ChassisServiceTag": null,
+ "ConnectionState": true,
+ "DeviceManagement": [
+ {
+ "DnsName": "dnsname.host.com",
+ "InstrumentationName": "MX-12345",
+ "MacAddress": "11:10:11:10:11:10",
+ "ManagementId": 12345,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 12345,
+ "ManagementProfileId": 12345,
+ "ManagementURL": "https://192.168.0.1:443",
+ "Status": 1000,
+ "StatusDateTime": "2019-01-21 06:30:08.501"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "192.168.0.1"
+ }
+ ],
+ "DeviceName": "MX-0003I",
+ "DeviceServiceTag": "MXL1234",
+ "DeviceSubscription": null,
+ "LastInventoryTime": "2019-01-21 06:30:08.501",
+ "LastStatusTime": "2019-01-21 06:30:02.492",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX7000",
+ "PowerState": 17,
+ "SlotConfiguration": {},
+ "Status": 4000,
+ "SystemId": 2031,
+ "Type": 2000
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICES_INVENTORY_DETAILS = "detailed_inventory"
+DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
+DEVICES_INVENTORY_TYPE = "inventory_type"
+DEVICE_LIST = "basic_inventory"
+DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
+device_fact_error_report = {}
+
+DEVICE_RESOURCE_COLLECTION = {
+ DEVICE_LIST: {"resource": "DeviceService/Devices"},
+ DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
+ DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
+ DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
+}
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ try:
+ path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]
+ resp = rest_obj.invoke_request('GET', path)
+ if resp.success:
+ devices_list = resp.json_data["value"]
+ service_tag_dict = {}
+ for item in devices_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ available_service_tags = service_tag_dict.values()
+ not_available_service_tag = list(set(service_tags) - set(available_service_tags))
+ device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag))
+ else:
+ raise ValueError(resp.json_data)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return service_tag_dict
+
+
+def is_int(val):
+ """check when device_id numeric represented value is int"""
+ try:
+ int(val)
+ return True
+ except ValueError:
+ return False
+
+
+def _check_duplicate_device_id(device_id_list, service_tag_dict):
+ """If service_tag is duplicate of device_id, then updates the message as Duplicate report
+ :arg1: device_id_list : list of device_id
+ :arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
+ if device_id_list:
+ device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
+ common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
+ for device_id in common_val:
+ device_fact_error_report.update(
+ {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
+ del service_tag_dict[device_id]
+
+
+def _get_device_identifier_map(module_params, rest_obj):
+ """
+ Builds the identifiers mapping
+ :returns: the dict of device_id to server_tag map
+ eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
+ system_query_options_param = module_params.get("system_query_options")
+ device_id_service_tag_dict = {}
+ if system_query_options_param is not None:
+ device_id_list = system_query_options_param.get("device_id")
+ device_service_tag_list = system_query_options_param.get("device_service_tag")
+ if device_id_list:
+ device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
+ device_id_service_tag_dict["device_id"] = device_id_dict
+ if device_service_tag_list:
+ service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
+ rest_obj)
+
+ _check_duplicate_device_id(device_id_list, service_tag_dict)
+ device_id_service_tag_dict["device_service_tag"] = service_tag_dict
+ return device_id_service_tag_dict
+
+
+def _get_query_parameters(module_params):
+ """
+ Builds query parameter
+ :returns: dictionary, which is applicable builds the query format
+ eg : {"$filter":"Type eq 2000"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = None
+ if system_query_options_param:
+ filter_by_val = system_query_options_param.get("filter")
+ if filter_by_val:
+ query_parameter = {"$filter": filter_by_val}
+ return query_parameter
+
+
+def _get_resource_parameters(module_params, rest_obj):
+ """
+ Identifies the resource path by different states
+ :returns: dictionary containing identifier with respective resource path
+ eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
+ """
+ fact_subset = module_params["fact_subset"]
+ path_dict = {}
+ if fact_subset != DEVICE_LIST:
+ inventory_type = None
+ device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
+ if fact_subset == DEVICES_INVENTORY_DETAILS:
+ system_query_options = module_params.get("system_query_options")
+ inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
+ path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
+ for identifier_type, identifier_dict in device_id_service_tag_dict.items():
+ path_dict[identifier_type] = {}
+ for device_id, service_tag in identifier_dict.items():
+ key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
+ path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
+ InventoryType=inventory_type)
+ path_dict[identifier_type].update({key_identifier: path})
+ else:
+ path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
+ return path_dict
+
+
+def _check_mutually_inclusive_arguments(val, module_params, required_args):
+ """"
+ Throws error if arguments detailed_inventory, subsystem_health
+ not exists with qualifier device_id or device_service_tag"""
+ system_query_options_param = module_params.get("system_query_options")
+ if system_query_options_param is None or (system_query_options_param is not None and not any(
+ system_query_options_param.get(qualifier) for qualifier in required_args)):
+ raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
+
+
+def _validate_inputs(module_params):
+ """validates input parameters"""
+ fact_subset = module_params["fact_subset"]
+ if fact_subset != "basic_inventory":
+ _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
+
+
+def main():
+ system_query_options = {"type": 'dict', "required": False, "options": {
+ "device_id": {"type": 'list'},
+ "device_service_tag": {"type": 'list'},
+ "inventory_type": {"type": 'str'},
+ "filter": {"type": 'str', "required": False},
+ }}
+
+ module = AnsibleModule(
+ argument_spec={
+ "hostname": {"required": True, "type": 'str'},
+ "username": {"required": True, "type": 'str'},
+ "password": {"required": True, "type": 'str', "no_log": True},
+ "port": {"required": False, "default": 443, "type": 'int'},
+ "fact_subset": {"required": False, "default": "basic_inventory",
+ "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
+ "system_query_options": system_query_options,
+ },
+ required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
+ ['fact_subset', 'subsystem_health', ['system_query_options']], ],
+ supports_check_mode=False)
+
+ try:
+ _validate_inputs(module.params)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_facts = _get_resource_parameters(module.params, rest_obj)
+ resp_status = []
+ if device_facts.get("basic_inventory"):
+ query_param = _get_query_parameters(module.params)
+ resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
+ device_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ for identifier_type, path_dict_map in device_facts.items():
+ for identifier, path in path_dict_map.items():
+ try:
+ resp = rest_obj.invoke_request('GET', path)
+ data = resp.json_data
+ resp_status.append(resp.status_code)
+ except HTTPError as err:
+ data = str(err)
+ path_dict_map[identifier] = data
+ if any(device_fact_error_report):
+ if "device_service_tag" in device_facts:
+ device_facts["device_service_tag"].update(device_fact_error_report)
+ else:
+ device_facts["device_service_tag"] = device_fact_error_report
+ if 200 in resp_status:
+ module.exit_json(device_info=device_facts)
+ else:
+ module.fail_json(msg="Failed to fetch the device information")
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py
new file mode 100644
index 00000000..b209b05a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: foreman
+short_description: Manage Foreman Resources
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Foreman resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ verify_ssl:
+ description:
+ - Whether to verify an SSL connection to Foreman server.
+ type: bool
+ default: False
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description).
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create CI Organization
+ community.general.foreman:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: organization
+ params:
+ name: My Cool New Organization
+ delegate_to: localhost
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+try:
+ from nailgun import entities
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+
+ return None
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True),
+ verify_ssl=dict(type='bool', default=False),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e),
+ exception=traceback.format_exc())
+
+ if entity == 'organization':
+ ng.organization(params)
+ module.exit_json(changed=True, result="%s updated" % entity)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py
new file mode 100644
index 00000000..732c4723
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: katello
+short_description: Manage Katello Resources
+deprecated:
+ removed_in: '2.0.0' # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Katello resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ choices:
+
+ - repository
+ - manifest
+ - repository_set
+ - sync_plan
+ - content_view
+ - lifecycle_environment
+ - activation_key
+ - product
+
+ required: true
+ action:
+ description:
+ - action associated to the entity resource to set or edit in dictionary format.
+ - Possible Action in relation to Entitys.
+ - "sync (available when entity=product or entity=repository)"
+ - "publish (available when entity=content_view)"
+ - "promote (available when entity=content_view)"
+ choices:
+ - sync
+ - publish
+ - promote
+ required: false
+ params:
+ description:
+ - Parameters associated to the entity resource and action, to set or edit in dictionary format.
+ - Each choice may be only available with specific entitys and actions.
+ - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
+ - The action "None" means no action specified.
+ - Possible Params in relation to entity and action.
+ - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "content ([manifest,None])"
+ - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
+ - "basearch ([repository_set,None])"
+ - "releaserver ([repository_set,None])"
+ - "sync_date ([sync_plan,None])"
+ - "interval ([sync_plan,None])"
+ - "repositories ([content_view,None])"
+ - "from_environment ([content_view,promote])"
+ - "to_environment([content_view,promote])"
+ - "prior ([lifecycle_environment,None])"
+ - "content_view ([activation_key,None])"
+ - "lifecycle_environment ([activation_key,None])"
+ required: true
+ task_timeout:
+ description:
+ - The timeout in seconds to wait for the started Foreman action to finish.
+ - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
+ default: 1000
+ required: false
+ verify_ssl:
+ description:
+ - verify the ssl/https connection (e.g for a valid certificate)
+ default: false
+ type: bool
+ required: false
+'''
+
+EXAMPLES = '''
+---
+# Simple Example:
+
+- name: Create Product
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: product
+ params:
+ name: Centos 7
+ delegate_to: localhost
+
+# Abstraction Example:
+# katello.yml
+---
+- name: "{{ name }}"
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: "{{ entity }}"
+ params: "{{ params }}"
+ delegate_to: localhost
+
+# tasks.yml
+---
+- include: katello.yml
+ vars:
+ name: Create Dev Environment
+ entity: lifecycle_environment
+ params:
+ name: Dev
+ prior: Library
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create Centos Product
+ entity: product
+ params:
+ name: Centos 7
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create 7.2 Repository
+ entity: repository
+ params:
+ name: Centos 7.2
+ product: Centos 7
+ organization: Default Organization
+ content_type: yum
+ url: http://mirror.centos.org/centos/7/os/x86_64/
+
+- include: katello.yml
+ vars:
+ name: Create Centos 7 View
+ entity: content_view
+ params:
+ name: Centos 7 View
+ organization: Default Organization
+ repositories:
+ - name: Centos 7.2
+ product: Centos 7
+
+- include: katello.yml
+ vars:
+ name: Enable RHEL Product
+ entity: repository_set
+ params:
+ name: Red Hat Enterprise Linux 7 Server (RPMs)
+ product: Red Hat Enterprise Linux Server
+ organization: Default Organization
+ basearch: x86_64
+ releasever: 7
+
+- include: katello.yml
+ vars:
+ name: Promote Contentview Environment with longer timeout
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+
+# Best Practices
+
+# In Foreman, things can be done in parallel.
+# When a conflicting action is already running,
+# the task will fail instantly instead of waiting for the already running action to complete.
+# So you should use a "until success" loop to catch this.
+
+- name: Promote Contentview Environment with increased Timeout
+ community.general.katello:
+ username: ansibleuser
+ password: supersecret
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+ register: task_result
+ until: task_result is success
+ retries: 9
+ delay: 120
+
+'''
+
+RETURN = '''# '''
+
+import datetime
+import os
+import traceback
+
+try:
+ from nailgun import entities, entity_fields, entity_mixins
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module, task_timeout):
+ self._server = server
+ self._entities = entities
+ self._module = module
+ entity_mixins.TASK_TIMEOUT = task_timeout
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No organization found for %s" % name)
+
+ def find_lifecycle_environment(self, name, organization):
+ org = self.find_organization(organization)
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
+
+ def find_product(self, name, organization):
+ org = self.find_organization(organization)
+
+ product = self._entities.Product(self._server, name=name, organization=org)
+ response = product.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Product found for %s" % name)
+
+ def find_repository(self, name, product, organization):
+ product = self.find_product(product, organization)
+
+ repository = self._entities.Repository(self._server, name=name, product=product)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Repository found for %s" % name)
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+ def manifest(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ try:
+ file = open(os.getcwd() + params['content'], 'r')
+ content = file.read()
+ finally:
+ file.close()
+
+ manifest = self._entities.Subscription(self._server)
+
+ try:
+ manifest.upload(
+ data={'organization_id': org.id},
+ files={'content': content}
+ )
+ return True
+ except Exception as e:
+
+ if "Import is the same as existing data" in e.message:
+ return False
+ else:
+ self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def product(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ product = self._entities.Product(self._server, **params)
+ response = product.search()
+
+ if len(response) == 1:
+ product.id = response[0].id
+ product.update()
+ else:
+ product.create()
+
+ return True
+
+ def sync_product(self, params):
+ org = self.find_organization(params['organization'])
+ product = self.find_product(params['name'], org.name)
+
+ return product.sync()
+
+ def repository(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ params['product'] = product.id
+ del params['organization']
+
+ repository = self._entities.Repository(self._server, **params)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ repository.id = response[0].id
+ repository.update()
+ else:
+ repository.create()
+
+ return True
+
+ def sync_repository(self, params):
+ org = self.find_organization(params['organization'])
+ repository = self.find_repository(params['name'], params['product'], org.name)
+
+ return repository.sync()
+
+ def repository_set(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ del params['product']
+ del params['organization']
+
+ if not product:
+ return False
+ else:
+ reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
+ reposet = reposet.search()[0]
+
+ formatted_name = [params['name'].replace('(', '').replace(')', '')]
+ formatted_name.append(params['basearch'])
+
+ if 'releasever' in params:
+ formatted_name.append(params['releasever'])
+
+ formatted_name = ' '.join(formatted_name)
+
+ repository = self._entities.Repository(self._server, product=product, name=formatted_name)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ repository = repository.search()
+
+ if len(repository) == 0:
+ if 'releasever' in params:
+ reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
+ else:
+ reposet.enable(data={'basearch': params['basearch']})
+
+ return True
+
+ def sync_plan(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+ params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
+
+ products = params['products']
+ del params['products']
+
+ sync_plan = self._entities.SyncPlan(
+ self._server,
+ name=params['name'],
+ organization=org
+ )
+ response = sync_plan.search()
+
+ sync_plan.sync_date = params['sync_date']
+ sync_plan.interval = params['interval']
+
+ if len(response) == 1:
+ sync_plan.id = response[0].id
+ sync_plan.update()
+ else:
+ response = sync_plan.create()
+ sync_plan.id = response[0].id
+
+ if products:
+ ids = []
+
+ for name in products:
+ product = self.find_product(name, org.name)
+ ids.append(product.id)
+
+ sync_plan.add_products(data={'product_ids': ids})
+
+ return True
+
+ def content_view(self, params):
+ org = self.find_organization(params['organization'])
+
+ content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ content_view.id = response[0].id
+ content_view.update()
+ else:
+ content_view = content_view.create()
+
+ if params['repositories']:
+ repos = []
+
+ for repository in params['repositories']:
+ repository = self.find_repository(repository['name'], repository['product'], org.name)
+ repos.append(repository)
+
+ content_view.repository = repos
+ content_view.update(['repository'])
+
+ def find_content_view_version(self, name, organization, environment):
+ env = self.find_lifecycle_environment(environment, organization)
+ content_view = self.find_content_view(name, organization)
+
+ content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
+ response = content_view_version.search(['content_view'], {'environment_id': env.id})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View version found for %s" % response)
+
+ def publish(self, params):
+ content_view = self.find_content_view(params['name'], params['organization'])
+
+ return content_view.publish()
+
+ def promote(self, params):
+ to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
+ version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
+
+ data = {'environment_id': to_environment.id}
+ return version.promote(data=data)
+
+ def lifecycle_environment(self, params):
+ org = self.find_organization(params['organization'])
+ prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ lifecycle_env.id = response[0].id
+ lifecycle_env.update()
+ else:
+ lifecycle_env.create()
+
+ return True
+
+ def activation_key(self, params):
+ org = self.find_organization(params['organization'])
+
+ activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
+ response = activation_key.search()
+
+ if len(response) == 1:
+ activation_key.id = response[0].id
+ activation_key.update()
+ else:
+ activation_key.create()
+
+ if params['content_view']:
+ content_view = self.find_content_view(params['content_view'], params['organization'])
+ lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
+
+ activation_key.content_view = content_view
+ activation_key.environment = lifecycle_environment
+ activation_key.update()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True,
+ choices=['repository', 'manifest', 'repository_set', 'sync_plan',
+ 'content_view', 'lifecycle_environment', 'activation_key', 'product']),
+ action=dict(type='str', choices=['sync', 'publish', 'promote']),
+ verify_ssl=dict(type='bool', default=False),
+ task_timeout=dict(type='int', default=1000),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ action = module.params['action']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+ task_timeout = module.params['task_timeout']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module, task_timeout)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ result = False
+
+ if entity == 'product':
+ if action == 'sync':
+ result = ng.sync_product(params)
+ else:
+ result = ng.product(params)
+ elif entity == 'repository':
+ if action == 'sync':
+ result = ng.sync_repository(params)
+ else:
+ result = ng.repository(params)
+ elif entity == 'manifest':
+ result = ng.manifest(params)
+ elif entity == 'repository_set':
+ result = ng.repository_set(params)
+ elif entity == 'sync_plan':
+ result = ng.sync_plan(params)
+ elif entity == 'content_view':
+ if action == 'publish':
+ result = ng.publish(params)
+ elif action == 'promote':
+ result = ng.promote(params)
+ else:
+ result = ng.content_view(params)
+ elif entity == 'lifecycle_environment':
+ result = ng.lifecycle_environment(params)
+ elif entity == 'activation_key':
+ result = ng.activation_key(params)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+ module.exit_json(changed=result, result="%s updated" % entity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py
new file mode 100644
index 00000000..1e37aee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hpilo_boot
+author: Dag Wieers (@dagwieers)
+short_description: Boot system using specific media through HP iLO interface
+description:
+- "This module boots a system through its HP iLO interface. The boot media
+ can be one of: cdrom, floppy, hdd, network or usb."
+- This module requires the hpilo python module.
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ media:
+ description:
+ - The boot media to boot the system from
+ choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
+ image:
+ description:
+ - The URL of a cdrom, floppy or usb boot media image.
+ protocol://username:password@hostname:port/filename
+ - protocol is either 'http' or 'https'
+ - username:password is optional
+ - port is optional
+ state:
+ description:
+ - The state of the boot media.
+ - "no_boot: Do not boot from the device"
+ - "boot_once: Boot from the device once and then notthereafter"
+ - "boot_always: Boot from the device each time the server is rebooted"
+ - "connect: Connect the virtual media device and set to boot_always"
+ - "disconnect: Disconnects the virtual media device and set to no_boot"
+ - "poweroff: Power off the server"
+ default: boot_once
+ choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
+ force:
+ description:
+ - Whether to force a reboot (even when the system is already booted).
+ - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ default: no
+ type: bool
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- python-hpilo
+notes:
+- To use a USB key image you need to specify floppy as boot media.
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ media: cdrom
+ image: http://some-web-server/iso/boot.iso
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+
+- name: Power off a server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_HOST
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ state: poweroff
+ delegate_to: localhost
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+import time
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
+ image=dict(type='str'),
+ state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
+ force=dict(type='bool', default=False),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ )
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ media = module.params['media']
+ image = module.params['image']
+ state = module.params['state']
+ force = module.params['force']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+ changed = False
+ status = {}
+ power_status = 'UNKNOWN'
+
+ if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
+
+ # Workaround for: Error communicating with iLO: Problem manipulating EV
+ try:
+ ilo.set_one_time_boot(media)
+ except hpilo.IloError:
+ time.sleep(60)
+ ilo.set_one_time_boot(media)
+
+ # TODO: Verify if image URL exists/works
+ if image:
+ ilo.insert_virtual_media(media, image)
+ changed = True
+
+ if media == 'cdrom':
+ ilo.set_vm_status('cdrom', state, True)
+ status = ilo.get_vm_status()
+ changed = True
+ elif media in ('floppy', 'usb'):
+ ilo.set_vf_status(state, True)
+ status = ilo.get_vf_status()
+ changed = True
+
+ # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
+ if state in ('boot_once', 'boot_always') or force:
+
+ power_status = ilo.get_host_power_status()
+
+ if not force and power_status == 'ON':
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+
+ if power_status == 'ON':
+ ilo.warm_boot_server()
+# ilo.cold_boot_server()
+ changed = True
+ else:
+ ilo.press_pwr_btn()
+# ilo.reset_server()
+# ilo.set_host_power(host_power=True)
+ changed = True
+
+ elif state in ('poweroff'):
+
+ power_status = ilo.get_host_power_status()
+
+ if not power_status == 'OFF':
+ ilo.hold_pwr_btn()
+# ilo.set_host_power(host_power=False)
+ changed = True
+
+ module.exit_json(changed=changed, power=power_status, **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py
new file mode 100644
index 00000000..451e4b06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hponcfg
+author: Dag Wieers (@dagwieers)
+short_description: Configure HP iLO interface using hponcfg
+description:
+- This modules configures the HP iLO interface using hponcfg.
+options:
+ path:
+ description:
+ - The XML file as accepted by hponcfg.
+ required: true
+ aliases: ['src']
+ minfw:
+ description:
+ - The minimum firmware level needed.
+ required: false
+ executable:
+ description:
+ - Path to the hponcfg executable (`hponcfg` which uses $PATH).
+ default: hponcfg
+ verbose:
+ description:
+ - Run hponcfg in verbose mode (-v).
+ default: no
+ type: bool
+requirements:
+- hponcfg tool
+notes:
+- You need a working hponcfg on the target system.
+'''
+
+EXAMPLES = r'''
+- name: Example hponcfg configuration XML
+ ansible.builtin.copy:
+ content: |
+ <ribcl VERSION="2.0">
+ <login USER_LOGIN="user" PASSWORD="password">
+ <rib_info MODE="WRITE">
+ <mod_global_settings>
+ <session_timeout value="0"/>
+ <ssh_status value="Y"/>
+ <ssh_port value="22"/>
+ <serial_cli_status value="3"/>
+ <serial_cli_speed value="5"/>
+ </mod_global_settings>
+ </rib_info>
+ </login>
+ </ribcl>
+ dest: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO using enable-ssh.xml
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO on VMware ESXi hypervisor
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+ executable: /opt/hp/tools/hponcfg
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ minfw=dict(type='str'),
+ executable=dict(default='hponcfg', type='str'),
+ verbose=dict(default=False, type='bool'),
+ )
+ )
+
+ # Consider every action a change (not idempotent yet!)
+ changed = True
+
+ src = module.params['src']
+ minfw = module.params['minfw']
+ executable = module.params['executable']
+ verbose = module.params['verbose']
+
+ options = ' -f %s' % src
+
+ if verbose:
+ options += ' -v'
+
+ if minfw:
+ options += ' -m %s' % minfw
+
+ rc, stdout, stderr = module.run_command('%s %s' % (executable, options))
+
+ if rc != 0:
+ module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py
new file mode 100644
index 00000000..ca318b4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: imc_rest
+short_description: Manage Cisco IMC hardware through its REST API
+description:
+- Provides direct access to the Cisco IMC REST API.
+- Perform any configuration changes and actions that the Cisco IMC supports.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- lxml
+- xmljson >= 0.1.8
+options:
+ hostname:
+ description:
+ - IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
+ required: true
+ aliases: [ host, ip ]
+ username:
+ description:
+ - Username used to login to the switch.
+ default: admin
+ aliases: [ user ]
+ password:
+ description:
+ - The password to use for authentication.
+ default: password
+ path:
+ description:
+ - Name of the absolute path of the filename that includes the body
+ of the http request being sent to the Cisco IMC REST API.
+ - Parameter C(path) is mutual exclusive with parameter C(content).
+ aliases: [ 'src', 'config_file' ]
+ content:
+ description:
+ - When used instead of C(path), sets the content of the API requests directly.
+ - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
+ - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
+ the Cisco IMC output is subsequently merged.
+ - Parameter C(content) is mutual exclusive with parameter C(path).
+ protocol:
+ description:
+ - Connection protocol to use.
+ default: https
+ choices: [ http, https ]
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+ - This is the time that every single connection (every fragment) can spend.
+ If this C(timeout) is reached, the module will fail with a
+ C(Connection failure) indicating that C(The read operation timed out).
+ default: 60
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+notes:
+- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
+- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
+- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
+ from the previous configuration. As a result, this module will always report a change on subsequent runs.
+ In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+ parameter. Some XML fragments can take longer than the default timeout.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+'''
+
+EXAMPLES = r'''
+- name: Power down server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Configure IMC using multiple XML fragments
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <!-- Configure Serial-on-LAN -->
+ <configConfMo><inConfig>
+ <solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
+ </inConfig></configConfMo>
+
+ <!-- Configure Console Redirection -->
+ <configConfMo><inConfig>
+ <biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
+ vpBaudRate="115200"
+ vpConsoleRedirection="com-0"
+ vpFlowControl="none"
+ vpTerminalType="vt100"
+ vpPuttyKeyPad="LINUX"
+ vpRedirectionAfterPOST="Always Enable"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Enable PXE boot and power-cycle server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <!-- Configure PXE boot -->
+ <configConfMo><inConfig>
+ <lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
+ </inConfig></configConfMo>
+
+ <!-- Power cycle server -->
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Reconfigure IMC to boot from storage
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Add customer description to server
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Disable HTTP and increase session timeout to max value 10800 secs
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <configConfMo><inConfig>
+ <commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
+ </inConfig></configConfMo>
+
+ <configConfMo><inConfig>
+ <commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+aaLogin:
+ description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
+ "outPriv": "admin",
+ "outRefreshPeriod": "600",
+ "outSessionId": "114",
+ "outVersion": "2.0(13e)",
+ "response": "yes"
+ }
+configConfMo:
+ description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+elapsed:
+ description: Elapsed time in seconds
+ returned: always
+ type: int
+ sample: 31
+response:
+ description: HTTP response message, including content length
+ returned: always
+ type: str
+ sample: OK (729 bytes)
+status:
+ description: The HTTP response status code
+ returned: always
+ type: dict
+ sample: 200
+error:
+ description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
+ returned: failed
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "errorCode": "ERR-xml-parse-error",
+ "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
+ "invocationResult": "594",
+ "response": "yes"
+ }
+error_code:
+ description: Cisco IMC error code
+ returned: failed
+ type: str
+ sample: ERR-xml-parse-error
+error_text:
+ description: Cisco IMC error message
+ returned: failed
+ type: str
+ sample: |
+ XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
+input:
+ description: RAW XML input sent to the Cisco IMC, causing the error
+ returned: failed
+ type: str
+ sample: |
+ <configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
+output:
+ description: RAW XML output received from the Cisco IMC, with error details
+ returned: failed
+ type: str
+ sample: >
+ <error cookie=""
+ response="yes"
+ errorCode="ERR-xml-parse-error"
+ invocationResult="594"
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+'''
+
+import atexit
+import datetime
+import itertools
+import os
+import traceback
+
+LXML_ETREE_IMP_ERR = None
+try:
+ import lxml.etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+XMLJSON_COBRA_IMP_ERR = None
+try:
+ from xmljson import cobra
+ HAS_XMLJSON_COBRA = True
+except ImportError:
+ XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
+ HAS_XMLJSON_COBRA = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+def imc_response(module, rawoutput, rawinput=''):
+ ''' Handle IMC returned data '''
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ result = cobra.data(xmloutput)
+
+ # Handle errors
+ if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
+ if rawinput:
+ result['input'] = rawinput
+ result['output'] = rawoutput
+ result['error_code'] = xmloutput.get('errorCode')
+ result['error_text'] = xmloutput.get('errorDescr')
+ module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
+
+ return result
+
+
+def logout(module, url, cookie, timeout):
+ ''' Perform a logout, if needed '''
+ data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
+ resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
+
+
+def merge(one, two):
+ ''' Merge two complex nested datastructures into one'''
+ if isinstance(one, dict) and isinstance(two, dict):
+ copy = dict(one)
+ # copy.update({key: merge(one.get(key, None), two[key]) for key in two})
+ copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
+ return copy
+
+ elif isinstance(one, list) and isinstance(two, list):
+ return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
+
+ return one if two is None else two
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(type='str', required=True, aliases=['host', 'ip']),
+ username=dict(type='str', default='admin', aliases=['user']),
+ password=dict(type='str', default='password', no_log=True),
+ content=dict(type='str'),
+ path=dict(type='path', aliases=['config_file', 'src']),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ timeout=dict(type='int', default=60),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['content', 'path']],
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if not HAS_XMLJSON_COBRA:
+ module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ content = module.params['content']
+ path = module.params['path']
+
+ protocol = module.params['protocol']
+ timeout = module.params['timeout']
+
+ result = dict(
+ failed=False,
+ changed=False,
+ )
+
+ # Report missing file
+ file_exists = False
+ if path:
+ if os.path.isfile(path):
+ file_exists = True
+ else:
+ module.fail_json(msg='Cannot find/access path:\n%s' % path)
+
+ start = datetime.datetime.utcnow()
+
+ # Perform login first
+ url = '%s://%s/nuova' % (protocol, hostname)
+ data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
+ resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or auth['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
+ result.update(imc_response(module, resp.read()))
+
+ # Store cookie for future requests
+ try:
+ cookie = result['aaaLogin']['attributes']['outCookie']
+ except Exception:
+ module.fail_json(msg='Could not find cookie in output', **result)
+
+ # If we would not log out properly, we run out of sessions quickly
+ atexit.register(logout, module, url, cookie, timeout)
+
+ # Prepare request data
+ if content:
+ rawdata = content
+ elif file_exists:
+ with open(path, 'r') as config_object:
+ rawdata = config_object.read()
+
+ # Wrap the XML documents in a <root> element
+ xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
+
+ # Handle each XML document separately in the same session
+ for xmldoc in list(xmldata):
+ if xmldoc.tag is lxml.etree.Comment:
+ continue
+ # Add cookie to XML
+ xmldoc.set('cookie', cookie)
+ data = lxml.etree.tostring(xmldoc)
+
+ # Perform actual request
+ resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or info['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
+
+ # Merge results with previous results
+ rawoutput = resp.read()
+ result = merge(result, imc_response(module, rawoutput, rawinput=data))
+ result['response'] = info['msg']
+ result['status'] = info['status']
+
+ # Check for any changes
+ # NOTE: Unfortunately IMC API always report status as 'modified'
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
+ result['changed'] = ('modified' in results)
+
+ # Report success
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py
new file mode 100644
index 00000000..6509ca21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ - "The choices for the device are:
+ - network -- Request network boot
+ - floppy -- Boot from floppy
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request"
+ required: true
+ choices:
+ - network
+ - floppy
+ - hd
+ - safe
+ - optical
+ - setup
+ - default
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ - "The choices for the state are:
+ - present -- Request system turn on
+ - absent -- Request system turn on"
+ default: present
+ choices: [ present, absent ]
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ type: bool
+ default: 'no'
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ type: bool
+ default: 'no'
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: str
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+- name: Ensure bootdevice is HD
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+- name: Ensure bootdevice is not Network
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: network
+ state: absent
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py
new file mode 100644
index 00000000..47840154
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ - "The choices for state are:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'"
+ choices: ['on', 'off', shutdown, reset, boot]
+ required: true
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: str
+ sample: on
+'''
+
+EXAMPLES = '''
+- name: Ensure machine is powered on
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: on
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py
new file mode 100644
index 00000000..7bd7b9ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_cmms
+short_description: Custom module for lxca cmms inventory utility
+description:
+ - This module returns/displays a inventory details of cmms
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: cmms
+ choices:
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all cmms info
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+
+# get specific cmms info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_uuid
+
+# get specific cmms info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_chassis_uuid
+
+'''
+
+RETURN = r'''
+result:
+ description: cmms detail from lxca
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple cmms details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import cmms
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _cmms(module, lxca_con):
+ return cmms(lxca_con)
+
+
+def _cmms_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return cmms(lxca_con, module.params['uuid'])
+
+
+def _cmms_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return cmms(lxca_con, chassis=module.params['chassis'])
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'cmms': _cmms,
+ 'cmms_by_uuid': _cmms_by_uuid,
+ 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ uuid=dict(default=None),
+ chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join((e) for e in exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py
new file mode 100644
index 00000000..febe2fd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_nodes
+short_description: Custom module for lxca nodes inventory utility
+description:
+ - This module returns/displays a inventory details of nodes
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: nodes
+ choices:
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all nodes info
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes
+
+# get specific nodes info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_uuid
+
+# get specific nodes info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_chassis_uuid
+
+# get managed nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_managed
+
+# get unmanaged nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_unmanaged
+
+'''
+
+RETURN = r'''
+result:
+ description: nodes detail from lxca
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple nodes details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import nodes
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _nodes(module, lxca_con):
+ return nodes(lxca_con)
+
+
+def _nodes_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return nodes(lxca_con, module.params['uuid'])
+
+
+def _nodes_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return nodes(lxca_con, chassis=module.params['chassis'])
+
+
+def _nodes_status_managed(module, lxca_con):
+ return nodes(lxca_con, status='managed')
+
+
+def _nodes_status_unmanaged(module, lxca_con):
+ return nodes(lxca_con, status='unmanaged')
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'nodes': _nodes,
+ 'nodes_by_uuid': _nodes_by_uuid,
+ 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
+ 'nodes_status_managed': _nodes_status_managed,
+ 'nodes_status_unmanaged': _nodes_status_unmanaged,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ uuid=dict(default=None), chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
new file mode 100644
index 00000000..d40a8ca0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alert_profiles
+
+short_description: Configuration of alert profiles for ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert profile should not exist,
+ - present - alert profile should exist,
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The unique alert profile name in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The resource type for the alert profile in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ alerts:
+ type: list
+ description:
+ - List of alert descriptions to assign to this profile.
+ - Required if state is "present"
+ notes:
+ type: str
+ description:
+ - Optional notes for this profile
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert profile to ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: present
+ name: Test profile
+ resource_type: ContainerNode
+ alerts:
+ - Test Alert 01
+ - Test Alert 02
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert profile from ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: absent
+ name: Test profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlertProfiles(object):
+ """ Object to execute alert profile management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
+
+ def get_profiles(self):
+ """ Get all alert profiles from ManageIQ
+ """
+ try:
+ response = self.client.get(self.url + '?expand=alert_definitions,resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
+ return response.get('resources') or []
+
+ def get_alerts(self, alert_descriptions):
+ """ Get a list of alert hrefs from a list of alert descriptions
+ """
+ alerts = []
+ for alert_description in alert_descriptions:
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
+ description=alert_description)
+ alerts.append(alert['href'])
+
+ return alerts
+
+ def add_profile(self, profile):
+ """ Add a new alert profile to ManageIQ
+ """
+ # find all alerts to add to the profile
+ # we do this first to fail early if one is missing.
+ alerts = self.get_alerts(profile['alerts'])
+
+ # build the profile dict to send to the server
+
+ profile_dict = dict(name=profile['name'],
+ description=profile['name'],
+ mode=profile['resource_type'])
+ if profile['notes']:
+ profile_dict['set_data'] = dict(notes=profile['notes'])
+
+ # send it to the server
+ try:
+ result = self.client.post(self.url, resource=profile_dict, action="create")
+ except Exception as e:
+ self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
+
+ # now that it has been created, we can assign the alerts
+ self.assign_or_unassign(result['results'][0], alerts, "assign")
+
+ msg = "Profile {name} created successfully"
+ msg = msg.format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def delete_profile(self, profile):
+ """ Delete an alert profile from ManageIQ
+ """
+ try:
+ self.client.post(profile['href'], action="delete")
+ except Exception as e:
+ self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
+
+ msg = "Successfully deleted profile {name}".format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def get_alert_href(self, alert):
+ """ Get an absolute href for an alert
+ """
+ return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
+
+ def assign_or_unassign(self, profile, resources, action):
+ """ Assign or unassign alerts to profile, and validate the result.
+ """
+ alerts = [dict(href=href) for href in resources]
+
+ subcollection_url = profile['href'] + '/alert_definitions'
+ try:
+ result = self.client.post(subcollection_url, resources=alerts, action=action)
+ if len(result['results']) != len(alerts):
+ msg = "Failed to {action} alerts to profile '{name}'," +\
+ "expected {expected} alerts to be {action}ed," +\
+ "but only {changed} were {action}ed"
+ msg = msg.format(action=action,
+ name=profile['name'],
+ expected=len(alerts),
+ changed=result['results'])
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to {action} alerts to profile '{name}': {error}"
+ msg = msg.format(action=action, name=profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ return result['results']
+
+ def update_profile(self, old_profile, desired_profile):
+ """ Update alert profile in ManageIQ
+ """
+ changed = False
+ # we need to use client.get to query the alert definitions
+ old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
+
+ # figure out which alerts we need to assign / unassign
+ # alerts listed by the user:
+ desired_alerts = set(self.get_alerts(desired_profile['alerts']))
+
+ # alert which currently exist in the profile
+ if 'alert_definitions' in old_profile:
+ # we use get_alert_href to have a direct href to the alert
+ existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
+ else:
+ # no alerts in this profile
+ existing_alerts = set()
+
+ to_add = list(desired_alerts - existing_alerts)
+ to_remove = list(existing_alerts - desired_alerts)
+
+ # assign / unassign the alerts, if needed
+
+ if to_remove:
+ self.assign_or_unassign(old_profile, to_remove, "unassign")
+ changed = True
+ if to_add:
+ self.assign_or_unassign(old_profile, to_add, "assign")
+ changed = True
+
+ # update other properties
+ profile_dict = dict()
+
+ if old_profile['mode'] != desired_profile['resource_type']:
+ # mode needs to be updated
+ profile_dict['mode'] = desired_profile['resource_type']
+
+ # check if notes need to be updated
+ old_notes = old_profile.get('set_data', {}).get('notes')
+
+ if desired_profile['notes'] != old_notes:
+ profile_dict['set_data'] = dict(notes=desired_profile['notes'])
+
+ if profile_dict:
+ # if we have any updated values
+ changed = True
+ try:
+ result = self.client.post(old_profile['href'],
+ resource=profile_dict,
+ action="edit")
+ except Exception as e:
+ msg = "Updating profile '{name}' failed: {error}"
+ msg = msg.format(name=old_profile['name'], error=e)
+ self.module.fail_json(msg=msg, result=result)
+
+ if changed:
+ msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
+ else:
+ msg = "No update needed for profile {name}".format(name=desired_profile['name'])
+ return dict(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ alerts=dict(type='list'),
+ notes=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['name', 'resource_type']),
+ ('state', 'absent', ['name'])])
+
+ state = module.params['state']
+ name = module.params['name']
+
+ manageiq = ManageIQ(module)
+ manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
+
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
+ name=name)
+
+ # we need to add or update the alert profile
+ if state == "present":
+ if not existing_profile:
+ # a profile with this name doesn't exist yet, let's create it
+ res_args = manageiq_alert_profiles.add_profile(module.params)
+ else:
+ # a profile with this name exists, we might need to update it
+ res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
+
+ # this alert profile should not exist
+ if state == "absent":
+ # if we have an alert profile with this name, delete it
+ if existing_profile:
+ res_args = manageiq_alert_profiles.delete_profile(existing_profile)
+ else:
+ # This alert profile does not exist in ManageIQ, and that's okay
+ msg = "Alert profile '{name}' does not exist in ManageIQ"
+ msg = msg.format(name=name)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
new file mode 100644
index 00000000..4f818a3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alerts
+
+short_description: Configuration of alerts in ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com
+description:
+ - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert should not exist,
+ - present - alert should exist,
+ required: False
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The unique alert description in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The entity type for the alert in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ expression_type:
+ type: str
+ description:
+ - Expression type.
+ default: hash
+ choices: ["hash", "miq"]
+ expression:
+ type: dict
+ description:
+ - The alert expression for ManageIQ.
+ - Can either be in the "Miq Expression" format or the "Hash Expression format".
+ - Required if state is "present".
+ enabled:
+ description:
+ - Enable or disable the alert. Required if state is "present".
+ type: bool
+ options:
+ type: dict
+ description:
+ - Additional alert options, such as notification type and frequency
+
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert with a "hash expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 01
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: ContainerNode
+ expression:
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Add an alert with a "miq expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 02
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: Vm
+ expression_type: miq
+ expression:
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert from ManageIQ
+ community.general.manageiq_alerts:
+ state: absent
+ description: Test Alert 01
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlert(object):
+ """ Represent a ManageIQ alert. Can be initialized with both the format
+ we receive from the server and the format we get from the user.
+ """
+ def __init__(self, alert):
+ self.description = alert['description']
+ self.db = alert['db']
+ self.enabled = alert['enabled']
+ self.options = alert['options']
+ self.hash_expression = None
+ self.miq_expressipn = None
+
+ if 'hash_expression' in alert:
+ self.hash_expression = alert['hash_expression']
+ if 'miq_expression' in alert:
+ self.miq_expression = alert['miq_expression']
+ if 'exp' in self.miq_expression:
+ # miq_expression is a field that needs a special case, because
+ # it's returned surrounded by a dict named exp even though we don't
+ # send it with that dict.
+ self.miq_expression = self.miq_expression['exp']
+
+ def __eq__(self, other):
+ """ Compare two ManageIQAlert objects
+ """
+ return self.__dict__ == other.__dict__
+
+
+class ManageIQAlerts(object):
+ """ Object to execute alert management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
+
+ def get_alerts(self):
+ """ Get all alerts from ManageIQ
+ """
+ try:
+ response = self.client.get(self.alerts_url + '?expand=resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
+ return response.get('resources', [])
+
+ def validate_hash_expression(self, expression):
+ """ Validate a 'hash expression' alert definition
+ """
+ # hash expressions must have the following fields
+ for key in ['options', 'eval_method', 'mode']:
+ if key not in expression:
+ msg = "Hash expression is missing required field {key}".format(key=key)
+ self.module.fail_json(msg)
+
+ def create_alert_dict(self, params):
+ """ Create a dict representing an alert
+ """
+ if params['expression_type'] == 'hash':
+ # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
+ self.validate_hash_expression(params['expression'])
+ expression_type = 'hash_expression'
+ else:
+ # actually miq_expression, but we call it "expression" for backwards-compatibility
+ expression_type = 'expression'
+
+ # build the alret
+ alert = dict(description=params['description'],
+ db=params['resource_type'],
+ options=params['options'],
+ enabled=params['enabled'])
+
+ # add the actual expression.
+ alert.update({expression_type: params['expression']})
+
+ return alert
+
+ def add_alert(self, alert):
+ """ Add a new alert to ManageIQ
+ """
+ try:
+ result = self.client.post(self.alerts_url, action='create', resource=alert)
+
+ msg = "Alert {description} created successfully: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Creating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to create a hash expression
+ msg = msg.format(description=alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def delete_alert(self, alert):
+ """ Delete an alert
+ """
+ try:
+ result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
+ id=alert['id']),
+ action="delete")
+ msg = "Alert {description} deleted: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Deleting alert {description} failed: {error}"
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def update_alert(self, existing_alert, new_alert):
+ """ Update an existing alert with the values from `new_alert`
+ """
+ new_alert_obj = ManageIQAlert(new_alert)
+ if new_alert_obj == ManageIQAlert(existing_alert):
+ # no change needed - alerts are identical
+ return dict(changed=False, msg="No update needed")
+ else:
+ try:
+ url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
+ result = self.client.post(url, action="edit", resource=new_alert)
+
+ # make sure that the update was indeed successful by comparing
+ # the result to the expected result.
+ if new_alert_obj == ManageIQAlert(result):
+ # success!
+ msg = "Alert {description} updated successfully: {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ return dict(changed=True, msg=msg)
+ else:
+ # unexpected result
+ msg = "Updating alert {description} failed, unexpected result {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = "Updating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to update a hash expression
+ msg = msg.format(description=existing_alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=existing_alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ description=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
+ expression=dict(type='dict'),
+ options=dict(type='dict'),
+ enabled=dict(type='bool'),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['description',
+ 'resource_type',
+ 'expression',
+ 'enabled',
+ 'options']),
+ ('state', 'absent', ['description'])])
+
+ state = module.params['state']
+ description = module.params['description']
+
+ manageiq = ManageIQ(module)
+ manageiq_alerts = ManageIQAlerts(manageiq)
+
+ existing_alert = manageiq.find_collection_resource_by("alert_definitions",
+ description=description)
+
+ # we need to add or update the alert
+ if state == "present":
+ alert = manageiq_alerts.create_alert_dict(module.params)
+
+ if not existing_alert:
+ # an alert with this description doesn't exist yet, let's create it
+ res_args = manageiq_alerts.add_alert(alert)
+ else:
+ # an alert with this description exists, we might need to update it
+ res_args = manageiq_alerts.update_alert(existing_alert, alert)
+
+ # this alert should not exist
+ elif state == "absent":
+ # if we have an alert with this description, delete it
+ if existing_alert:
+ res_args = manageiq_alerts.delete_alert(existing_alert)
+ else:
+ # it doesn't exist, and that's okay
+ msg = "Alert '{description}' does not exist in ManageIQ"
+ msg = msg.format(description=description)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py
new file mode 100644
index 00000000..2050eb63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py
@@ -0,0 +1,648 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_group
+
+short_description: Management of groups in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
+requirements:
+- manageiq-client
+
+options:
+ state:
+ type: str
+ description:
+ - absent - group should not exist, present - group should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The group description.
+ required: true
+ default: null
+ role_id:
+ type: int
+ description:
+ - The the group role id
+ required: false
+ default: null
+ role:
+ type: str
+ description:
+ - The the group role name
+ - The C(role_id) has precedence over the C(role) when supplied.
+ required: false
+ default: null
+ tenant_id:
+ type: int
+ description:
+ - The tenant for the group identified by the tenant id.
+ required: false
+ default: null
+ tenant:
+ type: str
+ description:
+ - The tenant for the group identified by the tenant name.
+ - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - Tenant names are case sensitive.
+ required: false
+ default: null
+ managed_filters:
+ description: The tag values per category
+ type: dict
+ required: false
+ default: null
+ managed_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing categories are kept or updated, new categories are added.
+ - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ choices: [ merge, replace ]
+ default: replace
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ type: list
+ elements: str
+ required: false
+ default: null
+ belongsto_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing settings are merged with the supplied C(belongsto_filters).
+ - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ choices: [ merge, replace ]
+ default: replace
+'''
+
+EXAMPLES = '''
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: 'my_tenant'
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant_id: 4
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name:
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: my_tenant
+ managed_filters:
+ prov_max_cpu:
+ - '1'
+ - '2'
+ - '4'
+ department:
+ - defense
+ - engineering
+ managed_filters_merge_mode: replace
+ belongsto_filters:
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ belongsto_filters_merge_mode: merge
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a group in ManageIQ
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+
+- name: Delete a group in ManageIQ using a token
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+'''
+
+RETURN = '''
+group:
+ description: The group.
+ returned: success
+ type: complex
+ contains:
+ description:
+ description: The group description
+ returned: success
+ type: str
+ id:
+ description: The group id
+ returned: success
+ type: int
+ group_type:
+ description: The group type, system or user
+ returned: success
+ type: str
+ role:
+ description: The group role name
+ returned: success
+ type: str
+ tenant:
+ description: The group tenant name
+ returned: success
+ type: str
+ managed_filters:
+ description: The tag values per category
+ returned: success
+ type: dict
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ returned: success
+ type: list
+ created_on:
+ description: Group creation date
+ returned: success
+ type: str
+ sample: "2018-08-12T08:37:55+00:00"
+ updated_on:
+ description: Group update date
+ returned: success
+ type: int
+ sample: "2018-08-12T08:37:55+00:00"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQgroup(object):
+ """
+ Object to execute group management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group(self, description):
+ """ Search for group object by description.
+ Returns:
+ the group, or None if group was not found.
+ """
+ groups = self.client.collections.groups.find_by(description=description)
+ if len(groups) == 0:
+ return None
+ else:
+ return groups[0]
+
+ def tenant(self, tenant_id, tenant_name):
+ """ Search for tenant entity by name or id
+ Returns:
+ the tenant entity, None if no id or name was supplied
+ """
+
+ if tenant_id:
+ tenant = self.client.get_entity('tenants', tenant_id)
+ if not tenant:
+ self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
+ return tenant
+ else:
+ if tenant_name:
+ tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
+ if not tenant_res:
+ self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
+ if len(tenant_res) > 1:
+ self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
+ tenant = tenant_res[0]
+ return tenant
+ else:
+ # No tenant name or tenant id supplied
+ return None
+
+ def role(self, role_id, role_name):
+ """ Search for a role object by name or id.
+ Returns:
+ the role entity, None no id or name was supplied
+
+ the role, or send a module Fail signal if role not found.
+ """
+ if role_id:
+ role = self.client.get_entity('roles', role_id)
+ if not role:
+ self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
+ return role
+ else:
+ if role_name:
+ role_res = self.client.collections.roles.find_by(name=role_name)
+ if not role_res:
+ self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
+ if len(role_res) > 1:
+ self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
+ return role_res[0]
+ else:
+ # No role name or role id supplied
+ return None
+
+ @staticmethod
+ def merge_dict_values(norm_current_values, norm_updated_values):
+ """ Create an merged update object for manageiq group filters.
+
+ The input dict contain the tag values per category.
+ If the new values contain the category, all tags for that category are replaced
+ If the new values do not contain the category, the existing tags are kept
+
+ Returns:
+ the nested array with the merged values, used in the update post body
+ """
+
+ # If no updated values are supplied, in merge mode, the original values must be returned
+ # otherwise the existing tag filters will be removed.
+ if norm_current_values and (not norm_updated_values):
+ return norm_current_values
+
+ # If no existing tag filters exist, use the user supplied values
+ if (not norm_current_values) and norm_updated_values:
+ return norm_updated_values
+
+ # start with norm_current_values's keys and values
+ res = norm_current_values.copy()
+ # replace res with norm_updated_values's keys and values
+ res.update(norm_updated_values)
+ return res
+
+ def delete_group(self, group):
+ """ Deletes a group from manageiq.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+ try:
+ url = '%s/groups/%s' % (self.api_url, group['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(
+ changed=True,
+ msg="deleted group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+
+ if role or norm_managed_filters or belongsto_filters:
+ group.reload(attributes=['miq_user_role_name', 'entitlement'])
+
+ try:
+ current_role = group['miq_user_role_name']
+ except AttributeError:
+ current_role = None
+
+ changed = False
+ resource = {}
+
+ if description and group['description'] != description:
+ resource['description'] = description
+ changed = True
+
+ if tenant and group['tenant_id'] != tenant['id']:
+ resource['tenant'] = dict(id=tenant['id'])
+ changed = True
+
+ if role and current_role != role['name']:
+ resource['role'] = dict(id=role['id'])
+ changed = True
+
+ if norm_managed_filters or belongsto_filters:
+
+ # Only compare if filters are supplied
+ entitlement = group['entitlement']
+
+ if 'filters' not in entitlement:
+ # No existing filters exist, use supplied filters
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+ changed = True
+ else:
+ current_filters = entitlement['filters']
+ new_filters = self.edit_group_edit_filters(current_filters,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+ if new_filters:
+ resource['filters'] = new_filters
+ changed = True
+
+ if not changed:
+ return dict(
+ changed=False,
+ msg="group %s is not changed." % group['description'])
+
+ # try to update group
+ try:
+ self.client.post(group['href'], action='edit', resource=resource)
+ changed = True
+ except Exception as e:
+ self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
+
+ return dict(
+ changed=changed,
+ msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group filters.
+
+ Returns:
+ None if no the group was not updated
+ If the group was updated the post body part for updating the group
+ """
+ filters_updated = False
+ new_filters_resource = {}
+
+ current_belongsto_set = current_filters.get('belongsto', set())
+
+ if belongsto_filters:
+ new_belongsto_set = set(belongsto_filters)
+ else:
+ new_belongsto_set = set()
+
+ if current_belongsto_set == new_belongsto_set:
+ new_filters_resource['belongsto'] = current_filters['belongsto']
+ else:
+ if belongsto_filters_merge_mode == 'merge':
+ current_belongsto_set.update(new_belongsto_set)
+ new_filters_resource['belongsto'] = list(current_belongsto_set)
+ else:
+ new_filters_resource['belongsto'] = list(new_belongsto_set)
+ filters_updated = True
+
+ # Process belongsto managed filter tags
+ # The input is in the form dict with keys are the categories and the tags are supplied string array
+ # ManageIQ, the current_managed, uses an array of arrays. One array of categories.
+ # We normalize the user input from a dict with arrays to a dict of sorted arrays
+ # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
+ norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
+
+ if norm_current_filters == norm_managed_filters:
+ if 'managed' in current_filters:
+ new_filters_resource['managed'] = current_filters['managed']
+ else:
+ if managed_filters_merge_mode == 'merge':
+ merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
+ else:
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ filters_updated = True
+
+ if not filters_updated:
+ return None
+
+ return new_filters_resource
+
+ def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
+ """ Creates the group in manageiq.
+
+ Returns:
+ the created group id, name, created_on timestamp,
+ updated_on timestamp.
+ """
+ # check for required arguments
+ for key, value in dict(description=description).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/groups' % self.api_url
+
+ resource = {'description': description}
+
+ if role is not None:
+ resource['role'] = dict(id=role['id'])
+
+ if tenant is not None:
+ resource['tenant'] = dict(id=tenant['id'])
+
+ if norm_managed_filters or belongsto_filters:
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created group %s" % description,
+ group_id=result['results'][0]['id']
+ )
+
+ @staticmethod
+ def normalized_managed_tag_filters_to_miq(norm_managed_filters):
+ if not norm_managed_filters:
+ return None
+
+ return list(norm_managed_filters.values())
+
+ @staticmethod
+ def manageiq_filters_to_sorted_dict(current_filters):
+ current_managed_filters = current_filters.get('managed')
+ if not current_managed_filters:
+ return None
+
+ res = {}
+ for tag_list in current_managed_filters:
+ tag_list.sort()
+ key = tag_list[0].split('/')[2]
+ res[key] = tag_list
+
+ return res
+
+ @staticmethod
+ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
+ if not managed_filters:
+ return None
+
+ res = {}
+ for cat_key in managed_filters:
+ cat_array = []
+ if not isinstance(managed_filters[cat_key], list):
+ module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
+ for tags in managed_filters[cat_key]:
+ miq_managed_tag = "/managed/" + cat_key + "/" + tags
+ cat_array.append(miq_managed_tag)
+ # Do not add empty categories. ManageIQ will remove all categories that are not supplied
+ if cat_array:
+ cat_array.sort()
+ res[cat_key] = cat_array
+ return res
+
+ @staticmethod
+ def create_result_group(group):
+ """ Creates the ansible result object from a manageiq group entity
+
+ Returns:
+ a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
+ """
+ try:
+ role_name = group['miq_user_role_name']
+ except AttributeError:
+ role_name = None
+
+ managed_filters = None
+ belongsto_filters = None
+ if 'filters' in group['entitlement']:
+ filters = group['entitlement']['filters']
+ belongsto_filters = filters.get('belongsto')
+ group_managed_filters = filters.get('managed')
+ if group_managed_filters:
+ managed_filters = {}
+ for tag_list in group_managed_filters:
+ key = tag_list[0].split('/')[2]
+ tags = []
+ for t in tag_list:
+ tags.append(t.split('/')[3])
+ managed_filters[key] = tags
+
+ return dict(
+ id=group['id'],
+ description=group['description'],
+ role=role_name,
+ tenant=group['tenant']['name'],
+ managed_filters=managed_filters,
+ belongsto_filters=belongsto_filters,
+ group_type=group['group_type'],
+ created_on=group['created_on'],
+ updated_on=group['updated_on'],
+ )
+
+
+def main():
+ argument_spec = dict(
+ description=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ role_id=dict(required=False, type='int'),
+ role=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='int'),
+ tenant=dict(required=False, type='str'),
+ managed_filters=dict(required=False, type='dict'),
+ managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(required=False, type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ description = module.params['description']
+ state = module.params['state']
+ role_id = module.params['role_id']
+ role_name = module.params['role']
+ tenant_id = module.params['tenant_id']
+ tenant_name = module.params['tenant']
+ managed_filters = module.params['managed_filters']
+ managed_filters_merge_mode = module.params['managed_filters_merge_mode']
+ belongsto_filters = module.params['belongsto_filters']
+ belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
+
+ manageiq = ManageIQ(module)
+ manageiq_group = ManageIQgroup(manageiq)
+
+ group = manageiq_group.group(description)
+
+ # group should not exist
+ if state == "absent":
+ # if we have a group, delete it
+ if group:
+ res_args = manageiq_group.delete_group(group)
+ # if we do not have a group, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="group '%s' does not exist in manageiq" % description)
+
+ # group should exist
+ if state == "present":
+
+ tenant = manageiq_group.tenant(tenant_id, tenant_name)
+ role = manageiq_group.role(role_id, role_name)
+ norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
+ # if we have a group, edit it
+ if group:
+ res_args = manageiq_group.edit_group(group, description, role, tenant,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+
+ # if we do not have a group, create it
+ else:
+ res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
+ group = manageiq.client.get_entity('groups', res_args['group_id'])
+
+ group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
+ res_args['group'] = manageiq_group.create_result_group(group)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py
new file mode 100644
index 00000000..600c0bff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies
+
+short_description: Management of resource policy_profiles in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - policy_profiles should not exist,
+ - present - policy_profiles should exist,
+ - list - list current policy_profiles and policies.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ policy_profiles:
+ type: list
+ description:
+ - list of dictionaries, each includes the policy_profile 'name' key.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the type of the resource to which the profile should be [un]assigned
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the name of the resource to which the profile should be [un]assigned
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Assign new policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Unassign a policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+manageiq_policies:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ
+ returned: always
+ type: dict
+ sample: '{
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
+ {
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
+ }
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+class ManageIQPolicies(object):
+ """
+ Object to execute policies management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def query_profile_href(self, profile):
+ """ Add or Update the policy_profile href field
+
+ Example:
+ {name: STR, ...} => {name: STR, href: STR}
+ """
+ resource = self.manageiq.find_collection_resource_or_fail(
+ "policy_profiles", **profile)
+ return dict(name=profile['name'], href=resource['href'])
+
+ def query_resource_profiles(self):
+ """ Returns a set of the profile objects objects assigned to the resource
+ """
+ url = '{resource_url}/policy_profiles?expand=resources'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api profile object to look like:
+ # {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
+ profiles = [self.clean_profile_object(profile) for profile in resources]
+
+ return profiles
+
+ def query_profile_policies(self, profile_id):
+ """ Returns a set of the policy objects assigned to the resource
+ """
+ url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
+ try:
+ response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('policies', [])
+
+ # clean the returned rest api policy object to look like:
+ # {name: STR, description: STR, active: BOOL}
+ policies = [self.clean_policy_object(policy) for policy in resources]
+
+ return policies
+
+ def clean_policy_object(self, policy):
+ """ Clean a policy object to have human readable form of:
+ {
+ name: STR,
+ description: STR,
+ active: BOOL
+ }
+ """
+ name = policy.get('name')
+ description = policy.get('description')
+ active = policy.get('active')
+
+ return dict(
+ name=name,
+ description=description,
+ active=active)
+
+ def clean_profile_object(self, profile):
+ """ Clean a profile object to have human readable form of:
+ {
+ profile_name: STR,
+ profile_description: STR,
+ policies: ARR<POLICIES>
+ }
+ """
+ profile_id = profile['id']
+ name = profile.get('name')
+ description = profile.get('description')
+ policies = self.query_profile_policies(profile_id)
+
+ return dict(
+ profile_name=name,
+ profile_description=description,
+ policies=policies)
+
+ def profiles_to_update(self, profiles, action):
+ """ Create a list of policies we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ profiles_to_post = []
+ assigned_profiles = self.query_resource_profiles()
+
+ # make a list of assigned full profile names strings
+ # e.g. ['openscap profile', ...]
+ assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
+
+ for profile in profiles:
+ assigned = profile.get('name') in assigned_profiles_set
+
+ if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
+ # add/update the policy profile href field
+ # {name: STR, ...} => {name: STR, href: STR}
+ profile = self.query_profile_href(profile)
+ profiles_to_post.append(profile)
+
+ return profiles_to_post
+
+ def assign_or_unassign_profiles(self, profiles, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of profiles needed to be changed
+ profiles_to_post = self.profiles_to_update(profiles, action)
+ if not profiles_to_post:
+ return dict(
+ changed=False,
+ msg="Profiles {profiles} already {action}ed, nothing to do".format(
+ action=action,
+ profiles=profiles))
+
+ # try to assign or unassign profiles to resource
+ url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=profiles_to_post)
+ except Exception as e:
+ msg = "Failed to {action} profile: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed profiles
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed profiles: {profiles}".format(
+ action=action,
+ profiles=profiles))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ policy_profiles=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['policy_profiles']),
+ ('state', 'absent', ['policy_profiles'])
+ ],
+ )
+
+ policy_profiles = module.params['policy_profiles']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
+
+ manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+ else:
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py
new file mode 100644
index 00000000..7f55b55b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py
@@ -0,0 +1,928 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: manageiq_provider
+short_description: Management of provider in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ choices: ['absent', 'present', 'refresh']
+ default: 'present'
+ name:
+ type: str
+ description: The provider's name.
+ required: true
+ type:
+ type: str
+ description: The provider's type.
+ choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
+ zone:
+ type: str
+ description: The ManageIQ zone name that will manage the provider.
+ default: 'default'
+ provider_region:
+ type: str
+ description: The provider region name to connect to (e.g. AWS region for Amazon).
+ host_default_vnc_port_start:
+ type: str
+ description: The first port in the host VNC range. defaults to None.
+ host_default_vnc_port_end:
+ type: str
+ description: The last port in the host VNC range. defaults to None.
+ subscription:
+ type: str
+ description: Microsoft Azure subscription ID. defaults to None.
+ project:
+ type: str
+ description: Google Compute Engine Project ID. defaults to None.
+ azure_tenant_id:
+ type: str
+ description: Tenant ID. defaults to None.
+ aliases: [ keystone_v3_domain_id ]
+ tenant_mapping_enabled:
+ type: bool
+ default: 'no'
+ description: Whether to enable mapping of existing tenants. defaults to False.
+ api_version:
+ type: str
+ description: The OpenStack Keystone API version. defaults to None.
+ choices: ['v2', 'v3']
+
+ provider:
+ description: Default endpoint connection information, required if state is true.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ metrics:
+ description: Metrics endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+
+ alerts:
+ description: Alerts endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ type: bool
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ default: true
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ ssh_keypair:
+ description: SSH key pair used for SSH connections to all hosts in this provider.
+ suboptions:
+ hostname:
+ type: str
+ description: Director hostname.
+ required: true
+ userid:
+ type: str
+ description: SSH username.
+ auth_key:
+ type: str
+ description: SSH private key.
+ validate_certs:
+ description:
+ - Whether certificates should be verified for connections.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+'''
+
+EXAMPLES = '''
+- name: Create a new provider in ManageIQ ('Hawkular' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'OpenShift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ role: 'hawkular'
+ hostname: 'example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1:80'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Delete a provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'absent'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Create a new Amazon provider in ManageIQ using token authentication
+ community.general.manageiq_provider:
+ name: 'EngAmazon'
+ type: 'Amazon'
+ state: 'present'
+ provider:
+ hostname: 'amazon.example.com'
+ userid: 'hello'
+ password: 'world'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+
+- name: Create a new oVirt provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'RHEV'
+ type: 'oVirt'
+ state: 'present'
+ provider:
+ hostname: 'rhev01.example.com'
+ userid: 'admin@internal'
+ password: 'password'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ hostname: 'metrics.example.com'
+ path: 'ovirt_engine_history'
+ userid: 'user_id_metrics'
+ password: 'password_metrics'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+- name: Create a new VMware provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngVMware'
+ type: 'VMware'
+ state: 'present'
+ provider:
+ hostname: 'vcenter.example.com'
+ host_default_vnc_port_start: 5800
+ host_default_vnc_port_end: 5801
+ userid: 'root'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+- name: Create a new Azure provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngAzure'
+ type: 'Azure'
+ provider_region: 'northeurope'
+ subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
+ azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
+ state: 'present'
+ provider:
+ hostname: 'azure.example.com'
+ userid: 'e272bd74-f661-484f-b223-88dd128a4049'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://cf-6af0.rhpds.opentlc.com'
+ username: 'admin'
+ password: 'password'
+ validate_certs: false
+
+- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
+ community.general.manageiq_provider:
+ name: 'EngDirector'
+ type: 'Director'
+ api_version: 'v3'
+ state: 'present'
+ provider:
+ hostname: 'director.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ ssh_keypair:
+ hostname: director.example.com
+ userid: heat-admin
+ auth_key: 'SecretSSHPrivateKey'
+
+- name: Create a new OpenStack provider in ManageIQ with amqp metrics
+ community.general.manageiq_provider:
+ name: 'EngOpenStack'
+ type: 'OpenStack'
+ api_version: 'v3'
+ state: 'present'
+ provider_region: 'europe'
+ tenant_mapping_enabled: 'False'
+ keystone_v3_domain_id: 'mydomain'
+ provider:
+ hostname: 'openstack.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ role: amqp
+ hostname: 'amqp.example.com'
+ security_protocol: 'non-ssl'
+ port: 5666
+ userid: admin
+ password: password
+
+
+- name: Create a new GCE provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngGoogle'
+ type: 'GCE'
+ provider_region: 'europe-west1'
+ project: 'project1'
+ state: 'present'
+ provider:
+ hostname: 'gce.example.com'
+ auth_key: 'google_json_key'
+ validate_certs: 'false'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+def supported_providers():
+ return dict(
+ Openshift=dict(
+ class_name='ManageIQ::Providers::Openshift::ContainerManager',
+ authtype='bearer',
+ default_role='default',
+ metrics_role='prometheus',
+ alerts_role='prometheus_alerts',
+ ),
+ Amazon=dict(
+ class_name='ManageIQ::Providers::Amazon::CloudManager',
+ ),
+ oVirt=dict(
+ class_name='ManageIQ::Providers::Redhat::InfraManager',
+ default_role='default',
+ metrics_role='metrics',
+ ),
+ VMware=dict(
+ class_name='ManageIQ::Providers::Vmware::InfraManager',
+ ),
+ Azure=dict(
+ class_name='ManageIQ::Providers::Azure::CloudManager',
+ ),
+ Director=dict(
+ class_name='ManageIQ::Providers::Openstack::InfraManager',
+ ssh_keypair_role="ssh_keypair"
+ ),
+ OpenStack=dict(
+ class_name='ManageIQ::Providers::Openstack::CloudManager',
+ ),
+ GCE=dict(
+ class_name='ManageIQ::Providers::Google::CloudManager',
+ ),
+ )
+
+
+def endpoint_list_spec():
+ return dict(
+ provider=dict(type='dict', options=endpoint_argument_spec()),
+ metrics=dict(type='dict', options=endpoint_argument_spec()),
+ alerts=dict(type='dict', options=endpoint_argument_spec()),
+ ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
+ )
+
+
+def endpoint_argument_spec():
+ return dict(
+ role=dict(),
+ hostname=dict(required=True),
+ port=dict(type='int'),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ certificate_authority=dict(),
+ security_protocol=dict(
+ choices=[
+ 'ssl-with-validation',
+ 'ssl-with-validation-custom-ca',
+ 'ssl-without-validation',
+ 'non-ssl',
+ ],
+ ),
+ userid=dict(),
+ password=dict(no_log=True),
+ auth_key=dict(no_log=True),
+ subscription=dict(no_log=True),
+ project=dict(),
+ uid_ems=dict(),
+ path=dict(),
+ )
+
+
+def delete_nulls(h):
+ """ Remove null entries from a hash
+
+ Returns:
+ a hash without nulls
+ """
+ if isinstance(h, list):
+ return [delete_nulls(i) for i in h]
+ if isinstance(h, dict):
+ return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
+
+ return h
+
+
+class ManageIQProvider(object):
+ """
+ Object to execute provider management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def class_name_to_type(self, class_name):
+ """ Convert class_name to type
+
+ Returns:
+ the type
+ """
+ out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
+ if len(out) == 1:
+ return out[0]
+
+ return None
+
+ def zone_id(self, name):
+ """ Search for zone id by zone name.
+
+ Returns:
+ the zone id, or send a module Fail signal if zone not found.
+ """
+ zone = self.manageiq.find_collection_resource_by('zones', name=name)
+ if not zone: # zone doesn't exist
+ self.module.fail_json(
+ msg="zone %s does not exist in manageiq" % (name))
+
+ return zone['id']
+
+ def provider(self, name):
+ """ Search for provider object by name.
+
+ Returns:
+ the provider, or None if provider not found.
+ """
+ return self.manageiq.find_collection_resource_by('providers', name=name)
+
+ def build_connection_configurations(self, provider_type, endpoints):
+ """ Build "connection_configurations" objects from
+ requested endpoints provided by user
+
+ Returns:
+ the user requested provider endpoints list
+ """
+ connection_configurations = []
+ endpoint_keys = endpoint_list_spec().keys()
+ provider_defaults = supported_providers().get(provider_type, {})
+
+ # get endpoint defaults
+ endpoint = endpoints.get('provider')
+ default_auth_key = endpoint.get('auth_key')
+
+ # build a connection_configuration object for each endpoint
+ for endpoint_key in endpoint_keys:
+ endpoint = endpoints.get(endpoint_key)
+ if endpoint:
+ # get role and authtype
+ role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
+ if role == 'default':
+ authtype = provider_defaults.get('authtype') or role
+ else:
+ authtype = role
+
+ # set a connection_configuration
+ connection_configurations.append({
+ 'endpoint': {
+ 'role': role,
+ 'hostname': endpoint.get('hostname'),
+ 'port': endpoint.get('port'),
+ 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
+ 'security_protocol': endpoint.get('security_protocol'),
+ 'certificate_authority': endpoint.get('certificate_authority'),
+ 'path': endpoint.get('path'),
+ },
+ 'authentication': {
+ 'authtype': authtype,
+ 'userid': endpoint.get('userid'),
+ 'password': endpoint.get('password'),
+ 'auth_key': endpoint.get('auth_key') or default_auth_key,
+ }
+ })
+
+ return connection_configurations
+
+ def delete_provider(self, provider):
+ """ Deletes a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Edit a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ connection_configurations=endpoints,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ )
+
+ # NOTE: we do not check for diff's between requested and current
+ # provider, we always submit endpoints with password or auth_keys,
+ # since we can not compare with current password or auth_key,
+ # every edit request is sent to ManageIQ API without comparing
+ # it to current state.
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to update provider
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the provider %s: %s" % (provider['name'], result))
+
+ def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Creates the provider in manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ connection_configurations=endpoints,
+ )
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to create a new provider
+ try:
+ url = '%s/providers' % (self.api_url)
+ result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the provider %s: %s" % (name, result['results']))
+
+ def refresh(self, provider, name):
+ """ Trigger provider refresh.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='refresh')
+ except Exception as e:
+ self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="refreshing provider %s" % name)
+
+
+def main():
+ zone_id = None
+ endpoints = []
+ argument_spec = dict(
+ state=dict(choices=['absent', 'present', 'refresh'], default='present'),
+ name=dict(required=True),
+ zone=dict(default='default'),
+ provider_region=dict(),
+ host_default_vnc_port_start=dict(),
+ host_default_vnc_port_end=dict(),
+ subscription=dict(),
+ project=dict(),
+ azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
+ tenant_mapping_enabled=dict(default=False, type='bool'),
+ api_version=dict(choices=['v2', 'v3']),
+ type=dict(choices=supported_providers().keys()),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+ # add the endpoint arguments to the arguments
+ argument_spec.update(endpoint_list_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['provider']),
+ ('state', 'refresh', ['name'])],
+ required_together=[
+ ['host_default_vnc_port_start', 'host_default_vnc_port_end']
+ ],
+ )
+
+ name = module.params['name']
+ zone_name = module.params['zone']
+ provider_type = module.params['type']
+ raw_endpoints = module.params
+ provider_region = module.params['provider_region']
+ host_default_vnc_port_start = module.params['host_default_vnc_port_start']
+ host_default_vnc_port_end = module.params['host_default_vnc_port_end']
+ subscription = module.params['subscription']
+ uid_ems = module.params['azure_tenant_id']
+ project = module.params['project']
+ tenant_mapping_enabled = module.params['tenant_mapping_enabled']
+ api_version = module.params['api_version']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_provider = ManageIQProvider(manageiq)
+
+ provider = manageiq_provider.provider(name)
+
+ # provider should not exist
+ if state == "absent":
+ # if we have a provider, delete it
+ if provider:
+ res_args = manageiq_provider.delete_provider(provider)
+ # if we do not have a provider, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ # provider should exist
+ if state == "present":
+ # get data user did not explicitly give
+ if zone_name:
+ zone_id = manageiq_provider.zone_id(zone_name)
+
+ # if we do not have a provider_type, use the current provider_type
+ if provider and not provider_type:
+ provider_type = manageiq_provider.class_name_to_type(provider['type'])
+
+ # check supported_providers types
+ if not provider_type:
+ manageiq_provider.module.fail_json(
+ msg="missing required argument: provider_type")
+
+ # check supported_providers types
+ if provider_type not in supported_providers().keys():
+ manageiq_provider.module.fail_json(
+ msg="provider_type %s is not supported" % (provider_type))
+
+ # build "connection_configurations" objects from user requested endpoints
+ # "provider" is a required endpoint, if we have it, we have endpoints
+ if raw_endpoints.get("provider"):
+ endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
+
+ # if we have a provider, edit it
+ if provider:
+ res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+ # if we do not have a provider, create it
+ else:
+ res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+
+ # refresh provider (trigger sync)
+ if state == "refresh":
+ if provider:
+ res_args = manageiq_provider.refresh(provider, name)
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py
new file mode 100644
index 00000000..68de2324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags
+
+short_description: Management of resource tags in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - tags should not exist,
+ - present - tags should exist,
+ - list - list current tags.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ tags:
+ type: list
+ description:
+ - tags - list of dictionaries, each includes 'name' and 'category' keys.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the relevant resource type in manageiq
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the relevant resource name in manageiq
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create new tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Remove tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def query_resource_id(manageiq, resource_type, resource_name):
+ """ Query the resource name in ManageIQ.
+
+ Returns:
+ the resource id if it exists in manageiq, Fail otherwise.
+ """
+ resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
+ if resource:
+ return resource["id"]
+ else:
+ msg = "{resource_name} {resource_type} does not exist in manageiq".format(
+ resource_name=resource_name, resource_type=resource_type)
+ manageiq.module.fail_json(msg=msg)
+
+
+class ManageIQTags(object):
+ """
+ Object to execute tags management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def full_tag_name(self, tag):
+ """ Returns the full tag name in manageiq
+ """
+ return '/managed/{tag_category}/{tag_name}'.format(
+ tag_category=tag['category'],
+ tag_name=tag['name'])
+
+ def clean_tag_object(self, tag):
+ """ Clean a tag object to have human readable form of:
+ {
+ full_name: STR,
+ name: STR,
+ display_name: STR,
+ category: STR
+ }
+ """
+ full_name = tag.get('name')
+ categorization = tag.get('categorization', {})
+
+ return dict(
+ full_name=full_name,
+ name=categorization.get('name'),
+ display_name=categorization.get('display_name'),
+ category=categorization.get('category', {}).get('name'))
+
+ def query_resource_tags(self):
+ """ Returns a set of the tag objects assigned to the resource
+ """
+ url = '{resource_url}/tags?expand=resources&attributes=categorization'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} tags: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api tag object to look like:
+ # {full_name: STR, name: STR, display_name: STR, category: STR}
+ tags = [self.clean_tag_object(tag) for tag in resources]
+
+ return tags
+
+ def tags_to_update(self, tags, action):
+ """ Create a list of tags we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ tags_to_post = []
+ assigned_tags = self.query_resource_tags()
+
+ # make a list of assigned full tag names strings
+ # e.g. ['/managed/environment/prod', ...]
+ assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
+
+ for tag in tags:
+ assigned = self.full_tag_name(tag) in assigned_tags_set
+
+ if assigned and action == 'unassign':
+ tags_to_post.append(tag)
+ elif (not assigned) and action == 'assign':
+ tags_to_post.append(tag)
+
+ return tags_to_post
+
+ def assign_or_unassign_tags(self, tags, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of tags needed to be changed
+ tags_to_post = self.tags_to_update(tags, action)
+ if not tags_to_post:
+ return dict(
+ changed=False,
+ msg="Tags already {action}ed, nothing to do".format(action=action))
+
+ # try to assign or unassign tags to resource
+ url = '{resource_url}/tags'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=tags)
+ except Exception as e:
+ msg = "Failed to {action} tag: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed tags
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed tags".format(action=action))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ tags=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['tags']),
+ ('state', 'absent', ['tags'])
+ ],
+ )
+
+ tags = module.params['tags']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = query_resource_id(manageiq, resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+ else:
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py
new file mode 100644
index 00000000..3ec174cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_tenant
+
+short_description: Management of tenants in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
+requirements:
+- manageiq-client
+options:
+ state:
+ type: str
+ description:
+ - absent - tenant should not exist, present - tenant should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The tenant name.
+ required: true
+ default: null
+ description:
+ type: str
+ description:
+ - The tenant description.
+ required: true
+ default: null
+ parent_id:
+ type: int
+ description:
+ - The id of the parent tenant. If not supplied the root tenant is used.
+ - The C(parent_id) takes president over C(parent) when supplied
+ required: false
+ default: null
+ parent:
+ type: str
+ description:
+ - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ required: false
+ default: null
+ quotas:
+ type: dict
+ description:
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(cpu_allocated) (int): use null to remove the quota.'
+ - ' - C(mem_allocated) (GB): use null to remove the quota.'
+ - ' - C(storage_allocated) (GB): use null to remove the quota.'
+ - ' - C(vms_allocated) (int): use null to remove the quota.'
+ - ' - C(templates_allocated) (int): use null to remove the quota.'
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- name: Update the root tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'My Company'
+ description: 'My company name'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ description: 'Manufacturing department'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ parent_id: 1
+ quotas:
+ - cpu_allocated: 100
+ - mem_allocated: 50
+ - vms_allocated: null
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+
+- name: Delete a tenant in ManageIQ using a token
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+tenant:
+ description: The tenant.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The tenant id
+ returned: success
+ type: int
+ name:
+ description: The tenant name
+ returned: success
+ type: str
+ description:
+ description: The tenant description
+ returned: success
+ type: str
+ parent_id:
+ description: The id of the parent tenant
+ returned: success
+ type: int
+ quotas:
+ description: List of tenant quotas
+ returned: success
+ type: list
+ sample:
+ cpu_allocated: 100
+ mem_allocated: 50
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQTenant(object):
+ """
+ Object to execute tenant management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def tenant(self, name, parent_id, parent):
+ """ Search for tenant object by name and parent_id or parent
+ or the root tenant if no parent or parent_id is supplied.
+ Returns:
+ the parent tenant, None for the root tenant
+ the tenant or None if tenant was not found.
+ """
+
+ if parent_id:
+ parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
+ parent_tenant = parent_tenant_res[0]
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if int(tenant_parent_id) == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ if parent:
+ parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
+
+ if len(parent_tenant_res) > 1:
+ self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
+
+ parent_tenant = parent_tenant_res[0]
+ parent_id = int(parent_tenant['id'])
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if tenant_parent_id == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ # No parent or parent id supplied we select the root tenant
+ return None, self.client.collections.tenants.find_by(ancestry=None)[0]
+
+ def compare_tenant(self, tenant, name, description):
+ """ Compare tenant fields with new field values.
+
+ Returns:
+ false if tenant fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and tenant['name'] != name) or
+ (description and tenant['description'] != description)
+ )
+
+ return not found_difference
+
+ def delete_tenant(self, tenant):
+ """ Deletes a tenant from manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ try:
+ url = '%s/tenants/%s' % (self.api_url, tenant['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_tenant(self, tenant, name, description):
+ """ Edit a manageiq tenant.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ resource = dict(name=name, description=description, use_config_for_attributes=False)
+
+ # check if we need to update ( compare_tenant is true is no difference found )
+ if self.compare_tenant(tenant, name, description):
+ return dict(
+ changed=False,
+ msg="tenant %s is not changed." % tenant['name'],
+ tenant=tenant['_data'])
+
+ # try to update tenant
+ try:
+ result = self.client.post(tenant['href'], action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the tenant with id %s" % (tenant['id']))
+
+ def create_tenant(self, name, description, parent_tenant):
+ """ Creates the tenant in manageiq.
+
+ Returns:
+ dict with `msg`, `changed` and `tenant_id`
+ """
+ parent_id = parent_tenant['id']
+ # check for required arguments
+ for key, value in dict(name=name, description=description, parent_id=parent_id).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/tenants' % self.api_url
+
+ resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ tenant_id = result['results'][0]['id']
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
+ tenant_id=tenant_id)
+
+ def tenant_quota(self, tenant, quota_key):
+ """ Search for tenant quota object by tenant and quota_key.
+ Returns:
+ the quota for the tenant, or None if the tenant quota was not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
+
+ return tenant_quotas['resources']
+
+ def tenant_quotas(self, tenant):
+ """ Search for tenant quotas object by tenant.
+ Returns:
+ the quotas for the tenant, or None if no tenant quotas were not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
+
+ return tenant_quotas['resources']
+
+ def update_tenant_quotas(self, tenant, quotas):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+
+ changed = False
+ messages = []
+ for quota_key, quota_value in quotas.items():
+ current_quota_filtered = self.tenant_quota(tenant, quota_key)
+ if current_quota_filtered:
+ current_quota = current_quota_filtered[0]
+ else:
+ current_quota = None
+
+ if quota_value:
+ # Change the byte values to GB
+ if quota_key in ['storage_allocated', 'mem_allocated']:
+ quota_value_int = int(quota_value) * 1024 * 1024 * 1024
+ else:
+ quota_value_int = int(quota_value)
+ if current_quota:
+ res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
+ else:
+ res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
+ else:
+ if current_quota:
+ res = self.delete_tenant_quota(tenant, current_quota)
+ else:
+ res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
+
+ if res['changed']:
+ changed = True
+
+ messages.append(res['msg'])
+
+ return dict(
+ changed=changed,
+ msg=', '.join(messages))
+
+ def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
+ """ Update the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+
+ if current_quota['value'] == quota_value:
+ return dict(
+ changed=False,
+ msg="tenant quota %s already has value %s" % (quota_key, quota_value))
+ else:
+
+ url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
+ resource = {'value': quota_value}
+ try:
+ self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated tenant quota %s" % quota_key)
+
+ def create_tenant_quota(self, tenant, quota_key, quota_value):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ url = '%s/quotas' % (tenant['href'])
+ resource = {'name': quota_key, 'value': quota_value}
+ try:
+ self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant quota %s" % quota_key)
+
+ def delete_tenant_quota(self, tenant, quota):
+ """ deletes the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ try:
+ result = self.client.post(quota['href'], action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def create_tenant_response(self, tenant, parent_tenant):
+ """ Creates the ansible result object from a manageiq tenant entity
+
+ Returns:
+ a dict with the tenant id, name, description, parent id,
+ quota's
+ """
+ tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
+
+ try:
+ ancestry = tenant['ancestry']
+ tenant_parent_id = ancestry.split("/")[-1]
+ except AttributeError:
+ # The root tenant does not return the ancestry attribute
+ tenant_parent_id = None
+
+ return dict(
+ id=tenant['id'],
+ name=tenant['name'],
+ description=tenant['description'],
+ parent_id=tenant_parent_id,
+ quotas=tenant_quotas
+ )
+
+ @staticmethod
+ def create_tenant_quotas_response(tenant_quotas):
+ """ Creates the ansible result object from a manageiq tenant_quotas entity
+
+ Returns:
+ a dict with the applied quotas, name and value
+ """
+
+ if not tenant_quotas:
+ return {}
+
+ result = {}
+ for quota in tenant_quotas:
+ if quota['unit'] == 'bytes':
+ value = float(quota['value']) / (1024 * 1024 * 1024)
+ else:
+ value = quota['value']
+ result[quota['name']] = value
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ description=dict(required=True, type='str'),
+ parent_id=dict(required=False, type='int'),
+ parent=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ quotas=dict(type='dict', default={})
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ name = module.params['name']
+ description = module.params['description']
+ parent_id = module.params['parent_id']
+ parent = module.params['parent']
+ state = module.params['state']
+ quotas = module.params['quotas']
+
+ manageiq = ManageIQ(module)
+ manageiq_tenant = ManageIQTenant(manageiq)
+
+ parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
+
+ # tenant should not exist
+ if state == "absent":
+ # if we have a tenant, delete it
+ if tenant:
+ res_args = manageiq_tenant.delete_tenant(tenant)
+ # if we do not have a tenant, nothing to do
+ else:
+ if parent_id:
+ msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
+ else:
+ msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
+
+ res_args = dict(
+ changed=False,
+ msg=msg)
+
+ # tenant should exist
+ if state == "present":
+ # if we have a tenant, edit it
+ if tenant:
+ res_args = manageiq_tenant.edit_tenant(tenant, name, description)
+
+ # if we do not have a tenant, create it
+ else:
+ res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
+ tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
+
+ # quotas as supplied and we have a tenant
+ if quotas:
+ tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
+ if tenant_quotas_res['changed']:
+ res_args['changed'] = True
+ res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
+
+ tenant.reload(expand='resources', attributes=['tenant_quotas'])
+ res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py
new file mode 100644
index 00000000..8905dde2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+#
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_user
+
+short_description: Management of users in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_user module supports adding, updating and deleting users in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - user should not exist, present - user should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ userid:
+ type: str
+ description:
+ - The unique userid in manageiq, often mentioned as username.
+ required: true
+ name:
+ type: str
+ description:
+ - The users' full name.
+ password:
+ type: str
+ description:
+ - The users' password.
+ group:
+ type: str
+ description:
+ - The name of the group to which the user belongs.
+ email:
+ type: str
+ description:
+ - The users' E-mail address.
+ update_password:
+ type: str
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+'''
+
+EXAMPLES = '''
+- name: Create a new user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a new user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ using a token
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQUser(object):
+ """
+ Object to execute user management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group_id(self, description):
+ """ Search for group id by group description.
+
+ Returns:
+ the group id, or send a module Fail signal if group not found.
+ """
+ group = self.manageiq.find_collection_resource_by('groups', description=description)
+ if not group: # group doesn't exist
+ self.module.fail_json(
+ msg="group %s does not exist in manageiq" % (description))
+
+ return group['id']
+
+ def user(self, userid):
+ """ Search for user object by userid.
+
+ Returns:
+ the user, or None if user not found.
+ """
+ return self.manageiq.find_collection_resource_by('users', userid=userid)
+
+ def compare_user(self, user, name, group_id, password, email):
+ """ Compare user fields with new field values.
+
+ Returns:
+ false if user fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and user['name'] != name) or
+ (password is not None) or
+ (email and user['email'] != email) or
+ (group_id and user['current_group_id'] != group_id)
+ )
+
+ return not found_difference
+
+ def delete_user(self, user):
+ """ Deletes a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/users/%s' % (self.api_url, user['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_user(self, user, name, group, password, email):
+ """ Edit a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ group_id = None
+ url = '%s/users/%s' % (self.api_url, user['id'])
+
+ resource = dict(userid=user['userid'])
+ if group is not None:
+ group_id = self.group_id(group)
+ resource['group'] = dict(id=group_id)
+ if name is not None:
+ resource['name'] = name
+ if email is not None:
+ resource['email'] = email
+
+ # if there is a password param, but 'update_password' is 'on_create'
+ # then discard the password (since we're editing an existing user)
+ if self.module.params['update_password'] == 'on_create':
+ password = None
+ if password is not None:
+ resource['password'] = password
+
+ # check if we need to update ( compare_user is true is no difference found )
+ if self.compare_user(user, name, group_id, password, email):
+ return dict(
+ changed=False,
+ msg="user %s is not changed." % (user['userid']))
+
+ # try to update user
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the user %s: %s" % (user['userid'], result))
+
+ def create_user(self, userid, name, group, password, email):
+ """ Creates the user in manageiq.
+
+ Returns:
+ the created user id, name, created_on timestamp,
+ updated_on timestamp, userid and current_group_id.
+ """
+ # check for required arguments
+ for key, value in dict(name=name, group=group, password=password).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % (key))
+
+ group_id = self.group_id(group)
+ url = '%s/users' % (self.api_url)
+
+ resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
+ if email is not None:
+ resource['email'] = email
+
+ # try to create a new user
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the user %s: %s" % (userid, result['results']))
+
+
+def main():
+ argument_spec = dict(
+ userid=dict(required=True, type='str'),
+ name=dict(),
+ password=dict(no_log=True),
+ group=dict(),
+ email=dict(),
+ state=dict(choices=['absent', 'present'], default='present'),
+ update_password=dict(choices=['always', 'on_create'],
+ default='always'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ userid = module.params['userid']
+ name = module.params['name']
+ password = module.params['password']
+ group = module.params['group']
+ email = module.params['email']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_user = ManageIQUser(manageiq)
+
+ user = manageiq_user.user(userid)
+
+ # user should not exist
+ if state == "absent":
+ # if we have a user, delete it
+ if user:
+ res_args = manageiq_user.delete_user(user)
+ # if we do not have a user, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="user %s: does not exist in manageiq" % (userid))
+
+ # user should exist
+ if state == "present":
+ # if we have a user, edit it
+ if user:
+ res_args = manageiq_user.edit_user(user, name, group, password, email)
+ # if we do not have a user, create it
+ else:
+ res_args = manageiq_user.create_user(userid, name, group, password, email)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py
new file mode 100644
index 00000000..a81e144a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network
+short_description: Manage OneView Ethernet Network resources
+description:
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ - C(default_bandwidth_reset) will reset the network connection template to the default.
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Ethernet Network is present using the default configuration
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ vlanId: '201'
+ delegate_to: localhost
+
+- name: Update the Ethernet Network changing bandwidth and purpose
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ newName: 'Renamed Ethernet Network'
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is absent
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: 'New Ethernet Network'
+ delegate_to: localhost
+
+- name: Create Ethernet networks in bulk
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ vlanIdRange: '1-10,15,17'
+ purpose: General
+ namePrefix: TestNetwork
+ smartLink: false
+ privateNetwork: false
+ bandwidth:
+ maximumBandwidth: 10000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Reset to the default network connection template
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: default_bandwidth_reset
+ data:
+ name: 'Test Ethernet Network'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ethernet_network:
+ description: Has the facts about the Ethernet Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+
+ethernet_network_bulk:
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When 'vlanIdRange' attribute is in data argument. Can be null.
+ type: dict
+
+ethernet_network_connection_template:
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On state 'default_bandwidth_reset'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class EthernetNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'Ethernet Network created successfully.'
+ MSG_UPDATED = 'Ethernet Network updated successfully.'
+ MSG_DELETED = 'Ethernet Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
+ MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
+
+ MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
+ MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
+ MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
+ MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
+
+ RESOURCE_FACT_NAME = 'ethernet_network'
+
+ def __init__(self):
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
+ data=dict(type='dict', required=True),
+ )
+
+ super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+
+ changed, msg, ansible_facts, resource = False, '', {}, None
+
+ if self.data.get('name'):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ if self.data.get('vlanIdRange'):
+ return self._bulk_present()
+ else:
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+ elif self.state == 'default_bandwidth_reset':
+ changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
+
+ def _present(self, resource):
+
+ bandwidth = self.data.pop('bandwidth', None)
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if bandwidth:
+ if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
+ result['changed'] = True
+ result['msg'] = self.MSG_UPDATED
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
+
+ return result
+
+ def _bulk_present(self):
+ vlan_id_range = self.data['vlanIdRange']
+ result = dict(ansible_facts={})
+ ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ if not ethernet_networks:
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_BULK_CREATED
+
+ else:
+ vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
+ for net in ethernet_networks[:]:
+ vlan_ids.remove(net['vlanId'])
+
+ if len(vlan_ids) == 0:
+ result['msg'] = self.MSG_BULK_ALREADY_EXIST
+ result['changed'] = False
+ else:
+ if len(vlan_ids) == 1:
+ self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
+ else:
+ self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
+
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_MISSING_BULK_CREATED
+ result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ return result
+
+ def _update_connection_template(self, ethernet_network, bandwidth):
+
+ if 'connectionTemplateUri' not in ethernet_network:
+ return False, None
+
+ connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
+
+ merged_data = connection_template.copy()
+ merged_data.update({'bandwidth': bandwidth})
+
+ if not self.compare(connection_template, merged_data):
+ connection_template = self.oneview_client.connection_templates.update(merged_data)
+ return True, connection_template
+ else:
+ return False, None
+
+ def _default_bandwidth_reset(self, resource):
+
+ if not resource:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
+
+ default_connection_template = self.oneview_client.connection_templates.get_default()
+
+ changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
+
+ return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
+ ethernet_network_connection_template=connection_template)
+
+
+def main():
+ EthernetNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py
new file mode 100644
index 00000000..45fa035c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network
+short_description: Manage OneView Fibre Channel Network resources.
+description:
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+requirements:
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Fibre Channel Network is present using the default configuration
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+
+- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ fabricType: 'DirectAttach'
+
+- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+
+- name: Ensure that the Fibre Channel Network is absent
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: absent
+ data:
+ name: 'New FC Network'
+'''
+
+RETURN = '''
+fc_network:
+ description: Has the facts about the managed OneView FC Network.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FC Network created successfully.'
+ MSG_UPDATED = 'FC Network updated successfully.'
+ MSG_DELETED = 'FC Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FC Network is already present.'
+ MSG_ALREADY_ABSENT = 'FC Network is already absent.'
+ RESOURCE_FACT_NAME = 'fc_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent']))
+
+ super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fc_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self._present(resource)
+ else:
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fc_network', scope_uris)
+ return result
+
+
+def main():
+ FcNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py
new file mode 100644
index 00000000..79d8ae21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network
+short_description: Manage OneView FCoE Network resources
+description:
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+requirements:
+ - "python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that FCoE Network is present using the default configuration
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: Test FCoE Network
+ vlanId: 201
+ delegate_to: localhost
+
+- name: Update the FCOE network scopes
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: New FCoE Network
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+ delegate_to: localhost
+
+- name: Ensure that FCoE Network is absent
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: New FCoE Network
+ delegate_to: localhost
+'''
+
+RETURN = '''
+fcoe_network:
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FCoE Network created successfully.'
+ MSG_UPDATED = 'FCoE Network updated successfully.'
+ MSG_DELETED = 'FCoE Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
+ MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
+ RESOURCE_FACT_NAME = 'fcoe_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent']))
+
+ super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fcoe_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
+ return result
+
+
+def main():
+ FcoeNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py
new file mode 100644
index 00000000..8ca49e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group
+short_description: Manage OneView Logical Interconnect Group resources
+description:
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ C(absent) will remove the resource from OneView, if it exists.
+ C(present) will ensure data properties are compliant with OneView.
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Logical Interconnect Group is present
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ uplinkSets: []
+ enclosureType: C7000
+ interconnectMapTemplate:
+ interconnectMapEntryTemplates:
+ - logicalDownlinkUri: ~
+ logicalLocation:
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
+ permittedInterconnectTypeName: HP VC Flex-10/10D Module
+ # Alternatively you can inform permittedInterconnectTypeUri
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group has the specified scopes
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ scopeUris:
+ - /rest/scopes/00SC123456
+ - /rest/scopes/01SC123456
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is present with name 'Test'
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: New Logical Interconnect Group
+ newName: Test
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is absent
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: New Logical Interconnect Group
+ delegate_to: localhost
+'''
+
+RETURN = '''
+logical_interconnect_group:
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class LogicalInterconnectGroupModule(OneViewModuleBase):
+ MSG_CREATED = 'Logical Interconnect Group created successfully.'
+ MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
+ MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
+ MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
+ MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
+
+ RESOURCE_FACT_NAME = 'logical_interconnect_group'
+
+ def __init__(self):
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict')
+ )
+
+ super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.logical_interconnect_groups
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+
+ self.__replace_name_by_uris(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
+
+ return result
+
+ def __replace_name_by_uris(self, data):
+ map_template = data.get('interconnectMapTemplate')
+
+ if map_template:
+ map_entry_templates = map_template.get('interconnectMapEntryTemplates')
+ if map_entry_templates:
+ for value in map_entry_templates:
+ permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
+ if permitted_interconnect_type_name:
+ value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
+ permitted_interconnect_type_name).get('uri')
+
+ def __get_interconnect_type_by_name(self, name):
+ i_type = self.oneview_client.interconnect_types.get_by('name', name)
+ if i_type:
+ return i_type[0]
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+
+def main():
+ LogicalInterconnectGroupModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py
new file mode 100644
index 00000000..cc70d5e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set
+short_description: Manage HPE OneView Network Set resources
+description:
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Create a Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ networkUris:
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ delegate_to: localhost
+
+- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ newName: OneViewSDK Test Network Set - Renamed
+ networkUris:
+ - Test Ethernet Network_1
+ delegate_to: localhost
+
+- name: Delete the Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: OneViewSDK Test Network Set - Renamed
+ delegate_to: localhost
+
+- name: Update the Network set with two scopes
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ scopeUris:
+ - /rest/scopes/01SC123456
+ - /rest/scopes/02SC123456
+ delegate_to: localhost
+'''
+
+RETURN = '''
+network_set:
+ description: Has the facts about the Network Set.
+ returned: On state 'present', but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class NetworkSetModule(OneViewModuleBase):
+ MSG_CREATED = 'Network Set created successfully.'
+ MSG_UPDATED = 'Network Set updated successfully.'
+ MSG_DELETED = 'Network Set deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Network Set is already present.'
+ MSG_ALREADY_ABSENT = 'Network Set is already absent.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
+ RESOURCE_FACT_NAME = 'network_set'
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict'))
+
+ def __init__(self):
+ super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.network_sets
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ self._replace_network_name_by_uri(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
+ return result
+
+ def _get_ethernet_network_by_name(self, name):
+ result = self.oneview_client.ethernet_networks.get_by('name', name)
+ return result[0] if result else None
+
+ def _get_network_uri(self, network_name_or_uri):
+ if network_name_or_uri.startswith('/rest/ethernet-networks'):
+ return network_name_or_uri
+ else:
+ enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
+ if enet_network:
+ return enet_network['uri']
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
+
+ def _replace_network_name_by_uri(self, data):
+ if 'networkUris' in data:
+ data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
+
+
+def main():
+ NetworkSetModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py
new file mode 100644
index 00000000..57e93475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager
+short_description: Manage OneView SAN Manager resources
+description:
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - C(present) ensures data properties are compliant with OneView.
+ - C(absent) removes the resource from OneView, if it exists.
+ - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ providerDisplayName: Brocade Network Advisor
+ connectionInfo:
+ - name: Host
+ value: 172.18.15.1
+ - name: Port
+ value: 5989
+ - name: Username
+ value: username
+ - name: Password
+ value: password
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Ensure a Device Manager for the Cisco SAN Provider is present
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.20.1
+ providerDisplayName: Cisco
+ connectionInfo:
+ - name: Host
+ value: 172.18.20.1
+ - name: SnmpPort
+ value: 161
+ - name: SnmpUserName
+ value: admin
+ - name: SnmpAuthLevel
+ value: authnopriv
+ - name: SnmpAuthProtocol
+ value: sha
+ - name: SnmpAuthString
+ value: password
+ delegate_to: localhost
+
+- name: Sets the SAN Manager connection information
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: connection_information_set
+ data:
+ connectionInfo:
+ - name: Host
+ value: '172.18.15.1'
+ - name: Port
+ value: '5989'
+ - name: Username
+ value: 'username'
+ - name: Password
+ value: 'password'
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Refreshes the SAN Manager
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.15.1
+ refreshState: RefreshPending
+ delegate_to: localhost
+
+- name: Delete the SAN Manager recently created
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: '172.18.15.1'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+san_manager:
+ description: Has the OneView facts about the SAN Manager.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
+
+
+class SanManagerModule(OneViewModuleBase):
+ MSG_CREATED = 'SAN Manager created successfully.'
+ MSG_UPDATED = 'SAN Manager updated successfully.'
+ MSG_DELETED = 'SAN Manager deleted successfully.'
+ MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
+ MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
+ MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
+ data=dict(type='dict', required=True)
+ )
+
+ def __init__(self):
+ super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.data.get('connectionInfo'):
+ for connection_hash in self.data.get('connectionInfo'):
+ if connection_hash.get('name') == 'Host':
+ resource_name = connection_hash.get('value')
+ elif self.data.get('name'):
+ resource_name = self.data.get('name')
+ else:
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+ raise OneViewModuleValueError(msg.format())
+
+ resource = self.resource_client.get_by_name(resource_name)
+
+ if self.state == 'present':
+ changed, msg, san_manager = self._present(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ elif self.state == 'absent':
+ return self.resource_absent(resource, method='remove')
+
+ elif self.state == 'connection_information_set':
+ changed, msg, san_manager = self._connection_information_set(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ def _present(self, resource):
+ if not resource:
+ provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
+ return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ # Remove 'connectionInfo' from comparison, since it is not possible to validate it.
+ resource.pop('connectionInfo', None)
+ merged_data.pop('connectionInfo', None)
+
+ if self.compare(resource, merged_data):
+ return False, self.MSG_ALREADY_PRESENT, resource
+ else:
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _connection_information_set(self, resource):
+ if not resource:
+ return self._present(resource)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+ merged_data.pop('refreshState', None)
+ if not self.data.get('connectionInfo', None):
+ raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _get_provider_uri_by_display_name(self, data):
+ display_name = data.get('providerDisplayName')
+ provider_uri = self.resource_client.get_provider_uri(display_name)
+
+ if not provider_uri:
+ raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
+
+ return provider_uri
+
+
+def main():
+ SanManagerModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py
new file mode 100644
index 00000000..ea97ecdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_command
+short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Create BIOS configuration job (schedule BIOS setting update)
+ community.general.idrac_redfish_command:
+ category: Systems
+ command: CreateBiosConfigJob
+ resource_id: System.Embedded.1
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def create_bios_config_job(self):
+ result = {}
+ key = "Bios"
+ jobs = "Jobs"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uris[0])
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
+ "@odata.id"]
+
+ payload = {"TargetSettingsURI": set_bios_attr_uri}
+ response = self.post_request(
+ self.root_uri + self.manager_uri + "/" + jobs, payload)
+ if response['ret'] is False:
+ return response
+
+ response_output = response['resp'].__dict__
+ job_id = response_output["headers"]["Location"]
+ job_id = re.search("JID_.+", job_id).group()
+ # Currently not passing job_id back to user but patch is coming
+ return {'ret': True, 'msg': "Config job %s created" % job_id}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["CreateBiosConfigJob"],
+ "Accounts": [],
+ "Manager": []
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "CreateBiosConfigJob":
+ # execute only if we find a Managers resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ result = rf_utils.create_bios_config_job()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(changed=True, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py
new file mode 100644
index 00000000..485d54cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_config
+short_description: Manages servers through iDRAC using Dell Redfish APIs
+description:
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ set or update a configuration attribute.
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Category to execute on iDRAC
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC
+ - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
+ I(SetSystemAttributes) are mutually exclusive commands when C(category)
+ is I(Manager)
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC
+ type: str
+ manager_attribute_name:
+ required: false
+ description:
+ - (deprecated) name of iDRAC attribute to update
+ type: str
+ manager_attribute_value:
+ required: false
+ description:
+ - (deprecated) value of iDRAC attribute to update
+ type: str
+ manager_attributes:
+ required: false
+ description:
+ - dictionary of iDRAC attribute name and value pairs to update
+ default: {}
+ type: 'dict'
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to iDRAC controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Enable NTP and set NTP server and Time zone attributes in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ NTPConfigGroup.1.NTPEnable: "Enabled"
+ NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
+ Time.1.Timezone: "{{ timezone }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable Syslog and set Syslog servers in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SysLog.1.SysLogEnable: "Enabled"
+ SysLog.1.Server1: "{{ syslog_server1 }}"
+ SysLog.1.Server2: "{{ syslog_server2 }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Configure SNMP community string, port, protocol and trap format
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SNMP.1.AgentEnable: "Enabled"
+ SNMP.1.AgentCommunity: "public_community_string"
+ SNMP.1.TrapFormat: "SNMPv1"
+ SNMP.1.SNMPProtocol: "All"
+ SNMP.1.DiscoveryPort: 161
+ SNMP.1.AlertPort: 162
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable CSIOR
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetLifecycleControllerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Set Power Supply Redundancy Policy to A/B Grid Redundant
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetSystemAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments
+)
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def set_manager_attributes(self, command):
+
+ result = {}
+ required_arg_spec = {'manager_attributes': {'required': True}}
+
+ try:
+ check_required_arguments(required_arg_spec, self.module.params)
+
+ except TypeError as e:
+ msg = to_native(e)
+ self.module.fail_json(msg=msg)
+
+ key = "Attributes"
+ command_manager_attributes_uri_map = {
+ "SetManagerAttributes": self.manager_uri,
+ "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
+ "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
+ }
+ manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
+
+ attributes = self.module.params['manager_attributes']
+ manager_attr_name = self.module.params.get('manager_attribute_name')
+ manager_attr_value = self.module.params.get('manager_attribute_value')
+
+ # manager attributes to update
+ if manager_attr_name:
+ attributes.update({manager_attr_name: manager_attr_value})
+
+ attrs_to_patch = {}
+ attrs_skipped = {}
+
+ # Search for key entry and extract URI from it
+ response = self.get_request(self.root_uri + manager_uri + "/" + key)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False,
+ 'msg': "%s: Key %s not found" % (command, key)}
+
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ return {'ret': False,
+ 'msg': "%s: Manager attribute %s not found" % (command, attr_name)}
+
+ # Find out if value is already set to what we want. If yes, exclude
+ # those attributes
+ if data[u'Attributes'][attr_name] == attr_value:
+ attrs_skipped.update({attr_name: attr_value})
+ else:
+ attrs_to_patch.update({attr_name: attr_value})
+
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "Manager attributes already set"}
+
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]
+}
+
+# list of mutually exclusive commands for a category
+CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
+ "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ manager_attribute_name=dict(default=None),
+ manager_attribute_value=dict(default=None),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # check for mutually exclusive commands
+ try:
+ # check_mutually_exclusive accepts a single list or list of lists that
+ # are groups of terms that should be mutually exclusive with one another
+ # and checks that against a dictionary
+ check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
+ dict.fromkeys(command_list, True))
+
+ except TypeError as e:
+ module.fail_json(msg=to_native(e))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
+ result = rf_utils.set_manager_attributes(command)
+
+ if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])):
+ module.deprecate(msg='Arguments `manager_attribute_name` and '
+ '`manager_attribute_value` are deprecated. '
+ 'Use `manager_attributes` instead for passing in '
+ 'the manager attribute name and value pairs',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py
new file mode 100644
index 00000000..78007f1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py
@@ -0,0 +1,756 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller ex. reboot, log management.
+ - Manages OOB controller users ex. add, remove, update.
+ - Manages system power ex. on, off, graceful and forced reboot.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - Username for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ id:
+ required: false
+ aliases: [ account_id ]
+ description:
+ - ID of account to delete/modify
+ type: str
+ new_username:
+ required: false
+ aliases: [ account_username ]
+ description:
+ - Username of account to add/delete/modify
+ type: str
+ new_password:
+ required: false
+ aliases: [ account_password ]
+ description:
+ - New password of account to add/modify
+ type: str
+ roleid:
+ required: false
+ aliases: [ account_roleid ]
+ description:
+ - Role of account to add/modify
+ type: str
+ bootdevice:
+ required: false
+ description:
+ - bootdevice when setting boot configuration
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ uefi_target:
+ required: false
+ description:
+ - UEFI target when bootdevice is "UefiTarget"
+ type: str
+ boot_next:
+ required: false
+ description:
+ - BootNext target when bootdevice is "UefiBootNext"
+ type: str
+ update_username:
+ required: false
+ aliases: [ account_updatename ]
+ description:
+ - new update user name for account_username
+ type: str
+ version_added: '0.2.0'
+ account_properties:
+ required: false
+ description:
+ - properties of account service to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ update_image_uri:
+ required: false
+ description:
+ - The URI of the image for the update
+ type: str
+ version_added: '0.2.0'
+ update_protocol:
+ required: false
+ description:
+ - The protocol for the update
+ type: str
+ version_added: '0.2.0'
+ update_targets:
+ required: false
+ description:
+ - The list of target resource URIs to apply the update to
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ update_creds:
+ required: false
+ description:
+ - The credentials for retrieving the update image
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ username:
+ required: false
+ description:
+ - The username for retrieving the update image
+ type: str
+ password:
+ required: false
+ description:
+ - The password for retrieving the update image
+ type: str
+ virtual_media:
+ required: false
+ description:
+ - The options for VirtualMedia commands
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ media_types:
+ required: false
+ description:
+ - The list of media types appropriate for the image
+ type: list
+ elements: str
+ image_url:
+ required: false
+ description:
+ - The URL od the image the insert or eject
+ type: str
+ inserted:
+ required: false
+ description:
+ - Indicates if the image is treated as inserted on command completion
+ type: bool
+ default: True
+ write_protected:
+ required: false
+ description:
+ - Indicates if the media is treated as write-protected
+ type: bool
+ default: True
+ username:
+ required: false
+ description:
+ - The username for accessing the image URL
+ type: str
+ password:
+ required: false
+ description:
+ - The password for accessing the image URL
+ type: str
+ transfer_protocol_type:
+ required: false
+ description:
+ - The network protocol to use with the image
+ type: str
+ transfer_method:
+ required: false
+ description:
+ - The transfer method to use with the image
+ type: str
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Restart system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulRestart
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Turn system power off
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceOff
+ resource_id: 437XR1138R2
+
+ - name: Restart system power forcefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceRestart
+ resource_id: 437XR1138R2
+
+ - name: Shutdown system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulShutdown
+ resource_id: 437XR1138R2
+
+ - name: Turn system power on
+ community.general.redfish_command:
+ category: Systems
+ command: PowerOn
+ resource_id: 437XR1138R2
+
+ - name: Reboot system power
+ community.general.redfish_command:
+ category: Systems
+ command: PowerReboot
+ resource_id: 437XR1138R2
+
+ - name: Set one-time boot device to {{ bootdevice }}
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiTarget"
+ uefi_target: "/0x31/0x33/0x01/0x01"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to BootNext target of "Boot0001"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiBootNext"
+ boot_next: "Boot0001"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: EnableContinuousBootOverride
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: DisableBootOverride
+
+ - name: Set chassis indicator LED to blink
+ community.general.redfish_command:
+ category: Chassis
+ command: IndicatorLedBlink
+ resource_id: 1U
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Add user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Add user using new option aliases
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+ account_roleid: "{{ account_roleid }}"
+
+ - name: Delete user
+ community.general.redfish_command:
+ category: Accounts
+ command: DeleteUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Disable user
+ community.general.redfish_command:
+ category: Accounts
+ command: DisableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Add and enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser,EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user password
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserPassword
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+
+ - name: Update user role
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserRole
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_updatename: "{{ account_updatename }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ update_username: "{{ update_username }}"
+
+ - name: Update AccountService properties
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateAccountServiceProperties
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_properties:
+ AccountLockoutThreshold: 5
+ AccountLockoutDuration: 600
+
+ - name: Clear Manager Logs with a timeout of 20 seconds
+ community.general.redfish_command:
+ category: Manager
+ command: ClearLogs
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Clear Sessions
+ community.general.redfish_command:
+ category: Sessions
+ command: ClearSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Simple update
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: https://example.com/myupdate.img
+
+ - name: Simple update with additional options
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: //example.com/myupdate.img
+ update_protocol: FTP
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: BMC
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: BMC
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: GracefulRestart
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulRestart
+ resource_id: BMC
+
+ - name: Turn manager power off
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceOff
+ resource_id: BMC
+
+ - name: Restart manager power forcefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceRestart
+ resource_id: BMC
+
+ - name: Shutdown manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulShutdown
+ resource_id: BMC
+
+ - name: Turn manager power on
+ community.general.redfish_command:
+ category: Manager
+ command: PowerOn
+ resource_id: BMC
+
+ - name: Reboot manager power
+ community.general.redfish_command:
+ category: Manager
+ command: PowerReboot
+ resource_id: BMC
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
+ "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"],
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
+ "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
+ "UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
+ "UpdateAccountServiceProperties"],
+ "Sessions": ["ClearSessions"],
+ "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
+ "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
+ "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
+ "Update": ["SimpleUpdate"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ id=dict(aliases=["account_id"]),
+ new_username=dict(aliases=["account_username"]),
+ new_password=dict(aliases=["account_password"], no_log=True),
+ roleid=dict(aliases=["account_roleid"]),
+ update_username=dict(type='str', aliases=["account_updatename"]),
+ account_properties=dict(type='dict', default={}),
+ bootdevice=dict(),
+ timeout=dict(type='int', default=10),
+ uefi_target=dict(),
+ boot_next=dict(),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ update_protocol=dict(),
+ update_targets=dict(type='list', elements='str', default=[]),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # user to add/modify/delete
+ user = {'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # update options
+ update_opts = {
+ 'update_image_uri': module.params['update_image_uri'],
+ 'update_protocol': module.params['update_protocol'],
+ 'update_targets': module.params['update_targets'],
+ 'update_creds': module.params['update_creds']
+ }
+
+ # Boot override options
+ boot_opts = {
+ 'bootdevice': module.params['bootdevice'],
+ 'uefi_target': module.params['uefi_target'],
+ 'boot_next': module.params['boot_next']
+ }
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Accounts":
+ ACCOUNTS_COMMANDS = {
+ "AddUser": rf_utils.add_user,
+ "EnableUser": rf_utils.enable_user,
+ "DeleteUser": rf_utils.delete_user,
+ "DisableUser": rf_utils.disable_user,
+ "UpdateUserRole": rf_utils.update_user_role,
+ "UpdateUserPassword": rf_utils.update_user_password,
+ "UpdateUserName": rf_utils.update_user_name,
+ "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
+ }
+
+ # execute only if we find an Account service resource
+ result = rf_utils._find_accountservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ result = ACCOUNTS_COMMANDS[command](user)
+
+ elif category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command.startswith('Power'):
+ result = rf_utils.manage_system_power(command)
+ elif command == "SetOneTimeBoot":
+ boot_opts['override_enabled'] = 'Once'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "EnableContinuousBootOverride":
+ boot_opts['override_enabled'] = 'Continuous'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "DisableBootOverride":
+ boot_opts['override_enabled'] = 'Disabled'
+ result = rf_utils.set_boot_override(boot_opts)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command in led_commands:
+ result = rf_utils.manage_indicator_led(command)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ClearSessions":
+ result = rf_utils.clear_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ # standardize on the Power* commands, but allow the the legacy
+ # GracefulRestart command
+ if command == 'GracefulRestart':
+ command = 'PowerGracefulRestart'
+
+ if command.startswith('Power'):
+ result = rf_utils.manage_manager_power(command)
+ elif command == 'ClearLogs':
+ result = rf_utils.clear_logs()
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media)
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "SimpleUpdate":
+ result = rf_utils.simple_update(update_opts)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ changed = result.get('changed', True)
+ module.exit_json(changed=changed, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py
new file mode 100644
index 00000000..26b692a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_config
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ set or update a configuration attribute.
+ - Manages BIOS configuration settings.
+ - Manages OOB controller configuration settings.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ bios_attribute_name:
+ required: false
+ description:
+ - name of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: str
+ bios_attribute_value:
+ required: false
+ description:
+ - value of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: raw
+ bios_attributes:
+ required: false
+ description:
+ - dictionary of BIOS attributes to update
+ default: {}
+ type: dict
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ boot_order:
+ required: false
+ description:
+ - list of BootOptionReference strings specifying the BootOrder
+ default: []
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ network_protocols:
+ required: false
+ description:
+ - setting dict of manager services to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ nic_addr:
+ required: false
+ description:
+ - EthernetInterface Address string on OOB controller
+ default: 'null'
+ type: str
+ version_added: '0.2.0'
+ nic_config:
+ required: false
+ description:
+ - setting dict of EthernetInterface on OOB controller
+ type: dict
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Set BootMode to UEFI
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Uefi"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set multiple BootMode attributes
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable PXE Boot for NIC1 using deprecated options
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attribute_name: PxeDev1EnDis
+ bios_attribute_value: Enabled
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set BIOS default settings with a timeout of 20 seconds
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosDefaultSettings
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Set boot order
+ community.general.redfish_config:
+ category: Systems
+ command: SetBootOrder
+ boot_order:
+ - Boot0002
+ - Boot0001
+ - Boot0000
+ - Boot0003
+ - Boot0004
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set boot order to the default
+ community.general.redfish_config:
+ category: Systems
+ command: SetDefaultBootOrder
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager Network Protocols
+ community.general.redfish_config:
+ category: Manager
+ command: SetNetworkProtocols
+ network_protocols:
+ SNMP:
+ ProtocolEnabled: True
+ Port: 161
+ HTTP:
+ ProtocolEnabled: False
+ Port: 8080
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager NIC
+ community.general.redfish_config:
+ category: Manager
+ command: SetManagerNic
+ nic_config:
+ DHCPv4:
+ DHCPEnabled: False
+ IPv4StaticAddresses:
+ Address: 192.168.1.3
+ Gateway: 192.168.1.1
+ SubnetMask: 255.255.255.0
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
+ "SetDefaultBootOrder"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ bios_attribute_name=dict(default='null'),
+ bios_attribute_value=dict(default='null', type='raw'),
+ bios_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ boot_order=dict(type='list', elements='str', default=[]),
+ network_protocols=dict(
+ type='dict',
+ default={}
+ ),
+ resource_id=dict(),
+ nic_addr=dict(default='null'),
+ nic_config=dict(
+ type='dict',
+ default={}
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # BIOS attributes to update
+ bios_attributes = module.params['bios_attributes']
+ if module.params['bios_attribute_name'] != 'null':
+ bios_attributes[module.params['bios_attribute_name']] = module.params[
+ 'bios_attribute_value']
+ module.deprecate(msg='The bios_attribute_name/bios_attribute_value '
+ 'options are deprecated. Use bios_attributes instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ # boot order
+ boot_order = module.params['boot_order']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # manager nic
+ nic_addr = module.params['nic_addr']
+ nic_config = module.params['nic_config']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetBiosDefaultSettings":
+ result = rf_utils.set_bios_default_settings()
+ elif command == "SetBiosAttributes":
+ result = rf_utils.set_bios_attributes(bios_attributes)
+ elif command == "SetBootOrder":
+ result = rf_utils.set_boot_order(boot_order)
+ elif command == "SetDefaultBootOrder":
+ result = rf_utils.set_default_boot_order()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetNetworkProtocols":
+ result = rf_utils.set_network_protocols(module.params['network_protocols'])
+ elif command == "SetManagerNic":
+ result = rf_utils.set_manager_nic(nic_addr, nic_config)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py
new file mode 100644
index 00000000..372ba2df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: stacki_host
+short_description: Add or remove host to stacki front-end
+description:
+ - Use this module to add or remove hosts to a stacki front-end via API.
+ - U(https://github.com/StackIQ/stacki)
+options:
+ name:
+ description:
+ - Name of the host to be added to Stacki.
+ required: True
+ type: str
+ stacki_user:
+ description:
+ - Username for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_user) is used instead.
+ required: True
+ type: str
+ stacki_password:
+ description:
+ - Password for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_password) is used instead.
+ required: True
+ type: str
+ stacki_endpoint:
+ description:
+ - URL for the Stacki API Endpoint.
+ required: True
+ type: str
+ prim_intf_mac:
+ description:
+ - MAC Address for the primary PXE boot network interface.
+ type: str
+ prim_intf_ip:
+ description:
+ - IP Address for the primary network interface.
+ type: str
+ prim_intf:
+ description:
+ - Name of the primary network interface.
+ type: str
+ force_install:
+ description:
+ - Set value to True to force node into install state if it already exists in stacki.
+ type: bool
+ state:
+ description:
+ - Set value to the desired state for the specified host.
+ type: str
+ choices: [ absent, present ]
+author:
+- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
+'''
+
+EXAMPLES = '''
+- name: Add a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ prim_intf_mac: mac_addr
+ prim_intf_ip: x.x.x.x
+ prim_intf: eth0
+
+- name: Remove a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: response to whether or not the api call completed successfully
+ returned: always
+ type: bool
+ sample: true
+
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: the value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class StackiHost(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.hostname = module.params['name']
+ self.rack = module.params['rack']
+ self.rank = module.params['rank']
+ self.appliance = module.params['appliance']
+ self.prim_intf = module.params['prim_intf']
+ self.prim_intf_ip = module.params['prim_intf_ip']
+ self.network = module.params['network']
+ self.prim_intf_mac = module.params['prim_intf_mac']
+ self.endpoint = module.params['stacki_endpoint']
+
+ auth_creds = {'USERNAME': module.params['stacki_user'],
+ 'PASSWORD': module.params['stacki_password']}
+
+ # Get Initial CSRF
+ cred_a = self.do_request(self.module, self.endpoint, method="GET")
+ cookie_a = cred_a.headers.get('Set-Cookie').split(';')
+ init_csrftoken = None
+ for c in cookie_a:
+ if "csrftoken" in c:
+ init_csrftoken = c.replace("csrftoken=", "")
+ init_csrftoken = init_csrftoken.rstrip("\r\n")
+ break
+
+ # Make Header Dictionary with initial CSRF
+ header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
+ 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
+
+ # Endpoint to get final authentication header
+ login_endpoint = self.endpoint + "/login"
+
+ # Get Final CSRF and Session ID
+ login_req = self.do_request(self.module, login_endpoint, headers=header,
+ payload=urlencode(auth_creds), method='POST')
+
+ cookie_f = login_req.headers.get('Set-Cookie').split(';')
+ csrftoken = None
+ for f in cookie_f:
+ if "csrftoken" in f:
+ csrftoken = f.replace("csrftoken=", "")
+ if "sessionid" in f:
+ sessionid = c.split("sessionid=", 1)[-1]
+ sessionid = sessionid.rstrip("\r\n")
+
+ self.header = {'csrftoken': csrftoken,
+ 'X-CSRFToken': csrftoken,
+ 'sessionid': sessionid,
+ 'Content-type': 'application/json',
+ 'Cookie': login_req.headers.get('Set-Cookie')}
+
+ def do_request(self, module, url, payload=None, headers=None, method=None):
+ res, info = fetch_url(module, url, data=payload, headers=headers, method=method)
+
+ if info['status'] != 200:
+ self.module.fail_json(changed=False, msg=info['msg'])
+
+ return res
+
+ def stack_check_host(self):
+ res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
+
+ if self.hostname in res.read():
+ return True
+ else:
+ return False
+
+ def stack_sync(self):
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
+
+ def stack_force_install(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "set host boot {0} action=install" \
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+ changed = True
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_add(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
+ .format(self.hostname, self.rack, self.rank, self.appliance)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_remove(self, result):
+ data = dict()
+
+ data['cmd'] = "remove host {0}"\
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = True
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ rack=dict(type='int', default=0),
+ rank=dict(type='int', default=0),
+ appliance=dict(type='str', default='backend'),
+ prim_intf=dict(type='str'),
+ prim_intf_ip=dict(type='str'),
+ network=dict(type='str', default='private'),
+ prim_intf_mac=dict(type='str'),
+ stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')),
+ stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True),
+ stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')),
+ force_install=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+ missing_params = list()
+
+ stacki = StackiHost(module)
+ host_exists = stacki.stack_check_host()
+
+ # If state is present, but host exists, need force_install flag to put host back into install state
+ if module.params['state'] == 'present' and host_exists and module.params['force_install']:
+ stacki.stack_force_install(result)
+ # If state is present, but host exists, and force_install and false, do nothing
+ elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
+ result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
+ .format(module.params['name'])
+ # Otherwise, state is present, but host doesn't exists, require more params to add host
+ elif module.params['state'] == 'present' and not host_exists:
+ for param in ['appliance', 'prim_intf',
+ 'prim_intf_ip', 'network', 'prim_intf_mac']:
+ if not module.params[param]:
+ missing_params.append(param)
+ if len(missing_params) > 0: # @FIXME replace with required_if
+ module.fail_json(msg="missing required arguments: {0}".format(missing_params))
+
+ stacki.stack_add(result)
+ # If state is absent, and host exists, lets remove it.
+ elif module.params['state'] == 'absent' and host_exists:
+ stacki.stack_remove(result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py
new file mode 100644
index 00000000..2f097fcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wakeonlan
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for.
+ required: true
+ type: str
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
+ default: 255.255.255.255
+ type: str
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet.
+ default: 7
+ type: int
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
+seealso:
+- module: community.windows.win_wakeonlan
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ community.general.wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: localhost
+
+- community.general.wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+import socket
+import struct
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible separator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = b''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ if not module.check_mode:
+
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error as e:
+ sock.close()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ sock.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mac=dict(type='str', required=True),
+ broadcast=dict(type='str', default='255.255.255.255'),
+ port=dict(type='int', default=7),
+ ),
+ supports_check_mode=True,
+ )
+
+ mac = module.params['mac']
+ broadcast = module.params['broadcast']
+ port = module.params['port']
+
+ wakeonlan(module, mac, broadcast, port)
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py
new file mode 100644
index 00000000..2aebc346
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhevm
+short_description: RHEV/oVirt automation
+description:
+ - This module only supports oVirt/RHEV version 3.
+ - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+requirements:
+ - ovirtsdk
+author:
+- Timothy Vandenbrande (@TimothyVandenbrande)
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ default: admin@internal
+ password:
+ description:
+ - The password for user authentication.
+ type: str
+ required: true
+ server:
+ description:
+ - The name/IP of your RHEV-m/oVirt instance.
+ type: str
+ default: 127.0.0.1
+ port:
+ description:
+ - The port on which the API is reachable.
+ type: int
+ default: 443
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the VM.
+ type: str
+ cluster:
+ description:
+ - The RHEV/oVirt cluster in which you want you VM to start.
+ type: str
+ datacenter:
+ description:
+ - The RHEV/oVirt datacenter in which you want you VM to start.
+ type: str
+ default: Default
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ type: str
+ choices: [ absent, cd, down, info, ping, present, restarted, up ]
+ default: present
+ image:
+ description:
+ - The template to use for the VM.
+ type: str
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ type: str
+ choices: [ desktop, host, server ]
+ default: server
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ type: str
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ type: int
+ default: 2
+ cpu_share:
+ description:
+ - This parameter is used to configure the CPU share.
+ type: int
+ default: 0
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ type: int
+ default: 1
+ osver:
+ description:
+ - The operating system option in RHEV/oVirt.
+ type: str
+ default: rhel_6x64
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ type: int
+ default: 1
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ type: bool
+ default: yes
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ type: list
+ elements: str
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ type: list
+ elements: str
+ aliases: [ interfaces, nics ]
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ type: list
+ elements: str
+ default: [ hd, network ]
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ type: bool
+ default: yes
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ type: str
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up').
+ - When I(state = 'down').
+ - When I(state = 'restarted').
+ type: int
+'''
+
+RETURN = r'''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = r'''
+- name: Basic get info from VM
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ state: info
+
+- name: Basic create example from image
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ cluster: centos
+ image: centos7_x64
+ state: present
+
+- name: Power management
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: uptime_server
+ image: centos7_x64
+ state: down
+
+- name: Multi disk, multi nic create example
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: server007
+ type: server
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: eth0
+ vlan: vlan2202
+ - name: eth1
+ vlan: vlan36
+ - name: eth2
+ vlan: vlan38
+ - name: eth3
+ vlan: vlan2202
+ disks:
+ - name: root
+ size: 10
+ domain: ssd-san
+ - name: swap
+ size: 10
+ domain: 15kiscsi-san
+ - name: opt
+ size: 10
+ domain: 15kiscsi-san
+ - name: var
+ size: 10
+ domain: 10kiscsi-san
+ - name: home
+ size: 10
+ domain: sata-san
+ boot_order:
+ - network
+ - hd
+ state: present
+
+- name: Add a CD to the disk cd_drive
+ community.general.rhevm:
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: server007
+ cd_drive: rhev-tools-setup.iso
+ state: cd
+
+- name: New host deployment + host network configuration
+ community.general.rhevm:
+ password: '{{ rhevm.admin.pass }}'
+ name: ovirt_node007
+ type: host
+ cluster: rhevm01
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: 172.31.224.200
+ netmask: 255.255.254.0
+ - name: p3p2
+ ip: 172.31.225.200
+ netmask: 255.255.254.0
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: rhevm
+ ip: 172.31.222.200
+ netmask: 255.255.255.0
+ management: yes
+ - name: bond0.36
+ network: vlan36
+ ip: 10.2.36.200
+ netmask: 255.255.254.0
+ gateway: 10.2.36.254
+ - name: bond0.2202
+ network: vlan2202
+ - name: bond0.38
+ network: vlan38
+ state: present
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
+STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
+
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except Exception:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has successfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ),
+ override_configuration=True,
+ bonding=tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_drive)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_drive)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ VM.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ # pylint: disable=unreachable
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if memory_policy == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == memory_policy:
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == memory:
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if memory_policy > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == cpu:
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == cpu_share:
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost:
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
+ user=dict(type='str', default='admin@internal'),
+ password=dict(type='str', required=True, no_log=True),
+ server=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int', default=443),
+ insecure_api=dict(type='bool', default=False),
+ name=dict(type='str'),
+ image=dict(type='str'),
+ datacenter=dict(type='str', default="Default"),
+ type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
+ cluster=dict(type='str', default=''),
+ vmhost=dict(type='str'),
+ vmcpu=dict(type='int', default=2),
+ vmmem=dict(type='int', default=1),
+ disks=dict(type='list', elements='str'),
+ osver=dict(type='str', default="rhel_6x64"),
+ ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
+ timeout=dict(type='int'),
+ mempol=dict(type='int', default=1),
+ vm_ha=dict(type='bool', default=True),
+ cpu_share=dict(type='int', default=0),
+ boot_order=dict(type='list', elements='str', default=['hd', 'network']),
+ del_prot=dict(type='bool', default=True),
+ cd_drive=dict(type='str'),
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py
new file mode 100644
index 00000000..63be0323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# Copyright: (c) Vincent Van de Kussen
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhn_channel
+short_description: Adds or removes Red Hat software channels
+description:
+ - Adds or removes Red Hat software channels.
+author:
+- Vincent Van der Kussen (@vincentvdk)
+notes:
+ - This module fetches the system id from RHN.
+ - This module doesn't support I(check_mode).
+options:
+ name:
+ description:
+ - Name of the software channel.
+ required: true
+ type: str
+ sysname:
+ description:
+ - Name of the system as it is known in RHN/Satellite.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the channel should be present or not, taking action if the state is different from what is stated.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ url:
+ description:
+ - The full URL to the RHN/Satellite API.
+ required: true
+ type: str
+ user:
+ description:
+ - RHN/Satellite login.
+ required: true
+ type: str
+ password:
+ description:
+ - RHN/Satellite password.
+ aliases: [pwd]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - If C(False), SSL certificates will not be validated.
+ - This should only set to C(False) when used on self controlled sites
+ using self-signed certificates, and you are absolutely sure that nobody
+ can modify traffic between the module and the site.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Add a Red Hat software channel
+ community.general.rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
+ delegate_to: localhost
+'''
+
+import ssl
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+def get_systemid(client, session, sysname):
+ systems = client.system.listUserSystems(session)
+ for system in systems:
+ if system.get('name') == sysname:
+ idres = system.get('id')
+ idd = int(idres)
+ return idd
+
+
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def base_channels(client, session, sys_id):
+ basechan = client.channel.software.listSystemChannels(session, sys_id)
+ try:
+ chans = [item['label'] for item in basechan]
+ except KeyError:
+ chans = [item['channel_label'] for item in basechan]
+ return chans
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ sysname=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=True, aliases=['pwd'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ )
+ )
+
+ state = module.params['state']
+ channelname = module.params['name']
+ systname = module.params['sysname']
+ saturl = module.params['url']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl_context = ssl._create_unverified_context()
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ # initialize connection
+ if ssl_context:
+ client = xmlrpc_client.ServerProxy(saturl, context=ssl_context)
+ else:
+ client = xmlrpc_client.Server(saturl)
+
+ try:
+ session = client.auth.login(user, password)
+ except Exception as e:
+ module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e))
+
+ if not session:
+ module.fail_json(msg="Failed to establish session with Satellite server.")
+
+ # get systemid
+ try:
+ sys_id = get_systemid(client, session, systname)
+ except Exception as e:
+ module.fail_json(msg="Unable to get system id: %s " % to_text(e))
+
+ if not sys_id:
+ module.fail_json(msg="Failed to get system id.")
+
+ # get channels for system
+ try:
+ chans = base_channels(client, session, sys_id)
+ except Exception as e:
+ module.fail_json(msg="Unable to get channel information: %s " % to_text(e))
+
+ try:
+ if state == 'present':
+ if channelname in chans:
+ module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
+ else:
+ subscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s added" % channelname)
+
+ if state == 'absent':
+ if channelname not in chans:
+ module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
+ else:
+ unsubscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s removed" % channelname)
+ except Exception as e:
+ module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e)))
+ finally:
+ client.auth.logout(session)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py
new file mode 100644
index 00000000..dfc408a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) James Laska
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhn_register
+short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
+description:
+ - Manage registration to the Red Hat Network.
+author:
+- James Laska (@jlaska)
+notes:
+ - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead.
+ - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
+requirements:
+ - rhnreg_ks
+ - either libxml2 or lxml
+options:
+ state:
+ description:
+ - Whether to register (C(present)), or unregister (C(absent)) a system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ username:
+ description:
+ - Red Hat Network username.
+ type: str
+ password:
+ description:
+ - Red Hat Network password.
+ type: str
+ server_url:
+ description:
+ - Specify an alternative Red Hat Network server URL.
+ - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ type: str
+ activationkey:
+ description:
+ - Supply an activation key for use with registration.
+ type: str
+ profilename:
+ description:
+ - Supply an profilename for use with registration.
+ type: str
+ ca_cert:
+ description:
+ - Supply a custom ssl CA certificate file for use with registration.
+ type: path
+ aliases: [ sslcacert ]
+ systemorgid:
+ description:
+ - Supply an organizational id for use with registration.
+ type: str
+ channels:
+ description:
+ - Optionally specify a list of channels to subscribe to upon successful registration.
+ type: list
+ elements: str
+ default: []
+ enable_eus:
+ description:
+ - If C(no), extended update support will be requested.
+ type: bool
+ default: no
+ nopackages:
+ description:
+ - If C(yes), the registered node will not upload its installed packages information to Satellite server.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Unregister system from RHN
+ community.general.rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
+
+- name: Register as user with password and auto-subscribe to available content
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+
+- name: Register with activationkey and enable extended update support
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: yes
+
+- name: Register with activationkey and set a profilename which may differ from the hostname
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
+
+- name: Register as user with password against a satellite server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+
+- name: Register as user with password and enable channels
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import os
+import sys
+
+# Attempt to import rhn client tools
+sys.path.insert(0, '/usr/share/rhn')
+try:
+ import up2date_client
+ import up2date_client.config
+ HAS_UP2DATE_CLIENT = True
+except ImportError:
+ HAS_UP2DATE_CLIENT = False
+
+# INSERT REDHAT SNIPPETS
+from ansible_collections.community.general.plugins.module_utils import redhat
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import urllib, xmlrpc_client
+
+
+class Rhn(redhat.RegistrationBase):
+
+ def __init__(self, module=None, username=None, password=None):
+ redhat.RegistrationBase.__init__(self, module, username, password)
+ self.config = self.load_config()
+ self.server = None
+ self.session = None
+
+ def logout(self):
+ if self.session is not None:
+ self.server.auth.logout(self.session)
+
+ def load_config(self):
+ '''
+ Read configuration from /etc/sysconfig/rhn/up2date
+ '''
+ if not HAS_UP2DATE_CLIENT:
+ return None
+
+ config = up2date_client.config.initUp2dateConfig()
+
+ return config
+
+ @property
+ def server_url(self):
+ return self.config['serverURL']
+
+ @property
+ def hostname(self):
+ '''
+ Return the non-xmlrpc RHN hostname. This is a convenience method
+ used for displaying a more readable RHN hostname.
+
+ Returns: str
+ '''
+ url = urllib.parse.urlparse(self.server_url)
+ return url[1].replace('xmlrpc.', '')
+
+ @property
+ def systemid(self):
+ systemid = None
+ xpath_str = "//member[name='system_id']/value/string"
+
+ if os.path.isfile(self.config['systemIdPath']):
+ fd = open(self.config['systemIdPath'], 'r')
+ xml_data = fd.read()
+ fd.close()
+
+ # Ugh, xml parsing time ...
+ # First, try parsing with libxml2 ...
+ if systemid is None:
+ try:
+ import libxml2
+ doc = libxml2.parseDoc(xml_data)
+ ctxt = doc.xpathNewContext()
+ systemid = ctxt.xpathEval(xpath_str)[0].content
+ doc.freeDoc()
+ ctxt.xpathFreeContext()
+ except ImportError:
+ pass
+
+ # m-kay, let's try with lxml now ...
+ if systemid is None:
+ try:
+ from lxml import etree
+ root = etree.fromstring(xml_data)
+ systemid = root.xpath(xpath_str)[0].text
+ except ImportError:
+ raise Exception('"libxml2" or "lxml" is required for this module.')
+
+ # Strip the 'ID-' prefix
+ if systemid is not None and systemid.startswith('ID-'):
+ systemid = systemid[3:]
+
+ return int(systemid)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system is registered.
+
+ Returns: True|False
+ '''
+ return os.path.isfile(self.config['systemIdPath'])
+
+ def configure_server_url(self, server_url):
+ '''
+ Configure server_url for registration
+ '''
+
+ self.config.set('serverURL', server_url)
+ self.config.save()
+
+ def enable(self):
+ '''
+ Prepare the system for RHN registration. This includes ...
+ * enabling the rhnplugin yum plugin
+ * disabling the subscription-manager yum plugin
+ '''
+ redhat.RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', True)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
+ '''
+ Register system to RHN. If enable_eus=True, extended update
+ support will be requested.
+ '''
+ register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
+ if self.username:
+ register_cmd.extend(['--username', self.username, '--password', self.password])
+ if self.server_url:
+ register_cmd.extend(['--serverUrl', self.server_url])
+ if enable_eus:
+ register_cmd.append('--use-eus-channel')
+ if nopackages:
+ register_cmd.append('--nopackages')
+ if activationkey is not None:
+ register_cmd.extend(['--activationkey', activationkey])
+ if profilename is not None:
+ register_cmd.extend(['--profilename', profilename])
+ if sslcacert is not None:
+ register_cmd.extend(['--sslCACert', sslcacert])
+ if systemorgid is not None:
+ register_cmd.extend(['--systemorgid', systemorgid])
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
+
+ def api(self, method, *args):
+ '''
+ Convenience RPC wrapper
+ '''
+ if self.server is None:
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ self.server = xmlrpc_client.ServerProxy(url)
+ self.session = self.server.auth.login(self.username, self.password)
+
+ func = getattr(self.server, method)
+ return func(self.session, *args)
+
+ def unregister(self):
+ '''
+ Unregister a previously registered system
+ '''
+
+ # Initiate RPC connection
+ self.api('system.deleteSystems', [self.systemid])
+
+ # Remove systemid file
+ os.unlink(self.config['systemIdPath'])
+
+ def subscribe(self, channels):
+ if not channels:
+ return
+
+ if self._is_hosted():
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ new_channels = [item['channel_label'] for item in current_channels]
+ new_channels.extend(channels)
+ return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
+
+ else:
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ current_channels = [item['label'] for item in current_channels]
+ new_base = None
+ new_childs = []
+ for ch in channels:
+ if ch in current_channels:
+ continue
+ if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
+ new_base = ch
+ else:
+ if ch not in new_childs:
+ new_childs.append(ch)
+ out_base = 0
+ out_childs = 0
+
+ if new_base:
+ out_base = self.api('system.setBaseChannel', self.systemid, new_base)
+
+ if new_childs:
+ out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
+
+ return out_base and out_childs
+
+ def _is_hosted(self):
+ '''
+ Return True if we are running against Hosted (rhn.redhat.com) or
+ False otherwise (when running against Satellite or Spacewalk)
+ '''
+ return 'rhn.redhat.com' in self.hostname
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ server_url=dict(type='str'),
+ activationkey=dict(type='str', no_log=True),
+ profilename=dict(type='str'),
+ ca_cert=dict(type='path', aliases=['sslcacert']),
+ systemorgid=dict(type='str'),
+ enable_eus=dict(type='bool', default=False),
+ nopackages=dict(type='bool', default=False),
+ channels=dict(type='list', elements='str', default=[]),
+ ),
+ # username/password is required for state=absent, or if channels is not empty
+ # (basically anything that uses self.api requires username/password) but it doesn't
+ # look like we can express that with required_if/required_together/mutually_exclusive
+
+ # only username+password can be used for unregister
+ required_if=[['state', 'absent', ['username', 'password']]],
+ )
+
+ if not HAS_UP2DATE_CLIENT:
+ module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+
+ state = module.params['state']
+ activationkey = module.params['activationkey']
+ profilename = module.params['profilename']
+ sslcacert = module.params['ca_cert']
+ systemorgid = module.params['systemorgid']
+ channels = module.params['channels']
+ enable_eus = module.params['enable_eus']
+ nopackages = module.params['nopackages']
+
+ rhn = Rhn(module=module, username=username, password=password)
+
+ # use the provided server url and persist it to the rhn config.
+ if server_url:
+ rhn.configure_server_url(server_url)
+
+ if not rhn.server_url:
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Check for missing parameters ...
+ if not (activationkey or rhn.username or rhn.password):
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
+ if not activationkey and not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
+
+ # Register system
+ if rhn.is_registered:
+ module.exit_json(changed=False, msg="System already registered.")
+
+ try:
+ rhn.enable()
+ rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
+ rhn.subscribe(channels)
+ except Exception as exc:
+ module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhn.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+
+ if not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
+
+ try:
+ rhn.unregister()
+ except Exception as exc:
+ module.fail_json(msg="Failed to unregister: %s" % exc)
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py
new file mode 100644
index 00000000..22b280f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+
+# (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_release
+short_description: Set or Unset RHSM Release version
+description:
+ - Sets or unsets the release version used by RHSM repositories.
+notes:
+ - This module will fail on an unregistered system.
+ Use the C(redhat_subscription) module to register a system
+ prior to setting the RHSM release.
+requirements:
+ - Red Hat Enterprise Linux 6+ with subscription-manager installed
+options:
+ release:
+ description:
+ - RHSM release version to use (use null to unset)
+ required: true
+ type: str
+author:
+ - Sean Myers (@seandst)
+'''
+
+EXAMPLES = '''
+# Set release version to 7.1
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "7.1"
+
+# Set release version to 6Server
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "6Server"
+
+# Unset release version
+- name: Unset RHSM release release
+ community.general.rhsm_release:
+ release: null
+'''
+
+RETURN = '''
+current_release:
+ description: The current RHSM release version value
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import re
+
+# Matches release-like values such as 7.2, 6.10, 10Server,
+# but rejects unlikely values, like 100Server, 100.0, 1.100, etc.
+release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b')
+
+
+def _sm_release(module, *args):
+ # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
+ # "subscription-manager release --set 0.1"
+ sm_bin = module.get_bin_path('subscription-manager', required=True)
+ cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ # delegate nonzero rc handling to run_command
+ return module.run_command(cmd, check_rc=True)
+
+
+def get_release(module):
+ # Get the current release version, or None if release unset
+ rc, out, err = _sm_release(module, '--show')
+ try:
+ match = release_matcher.findall(out)[0]
+ except IndexError:
+ # 0'th index did not exist; no matches
+ match = None
+
+ return match
+
+
+def set_release(module, release):
+ # Set current release version, or unset if release is None
+ if release is None:
+ args = ('--unset',)
+ else:
+ args = ('--set', release)
+
+ return _sm_release(module, *args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ release=dict(type='str', required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ target_release = module.params['release']
+
+ # sanity check: the target release at least looks like a valid release
+ if target_release and not release_matcher.findall(target_release):
+ module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release))
+
+ # Will fail with useful error from s-m if system not subscribed
+ current_release = get_release(module)
+
+ changed = (target_release != current_release)
+ if not module.check_mode and changed:
+ set_release(module, target_release)
+ # If setting the release fails, then a fail_json would have exited with
+ # the s-m error, e.g. "No releases match '7.20'...". If not, then the
+ # current release is now set to the target release (job's done)
+ current_release = target_release
+
+ module.exit_json(current_release=current_release, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py
new file mode 100644
index 00000000..7317be66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_repository
+short_description: Manage RHSM repositories using the subscription-manager command
+description:
+ - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
+ Management entitlement platform using the C(subscription-manager) command.
+author: Giovanni Sciortino (@giovannisciortino)
+notes:
+ - In order to manage RHSM repositories the system must be already registered
+ to RHSM manually or using the Ansible C(redhat_subscription) module.
+
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - If state is equal to present or disabled, indicates the desired
+ repository state.
+ choices: [present, enabled, absent, disabled]
+ default: "enabled"
+ type: str
+ name:
+ description:
+ - The ID of repositories to enable.
+ - To operate on several repositories this can accept a comma separated
+ list or a YAML list.
+ required: True
+ type: list
+ elements: str
+ purge:
+ description:
+ - Disable all currently enabled repositories that are not not specified in C(name).
+ Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ Using this with C(loop) will most likely not have the desired result.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Enable a RHSM repository
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+
+- name: Disable all RHSM repositories
+ community.general.rhsm_repository:
+ name: '*'
+ state: disabled
+
+- name: Enable all repositories starting with rhel-6-server
+ community.general.rhsm_repository:
+ name: rhel-6-server*
+ state: enabled
+
+- name: Disable all repositories except rhel-7-server-rpms
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+ purge: True
+'''
+
+RETURN = '''
+repositories:
+ description:
+ - The list of RHSM repositories with their states.
+ - When this module is used to change the repository states, this list contains the updated states after the changes.
+ returned: success
+ type: list
+'''
+
+import re
+import os
+from fnmatch import fnmatch
+from copy import deepcopy
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_subscription_manager(module, arguments):
+ # Execute subscription-manager with arguments and manage common errors
+ rhsm_bin = module.get_bin_path('subscription-manager')
+ if not rhsm_bin:
+ module.fail_json(msg='The executable file subscription-manager was not found in PATH')
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
+
+ if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
+ module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
+ elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+
+def get_repository_list(module, list_parameter):
+ # Generate RHSM repository list and return a list of dict
+ if list_parameter == 'list_enabled':
+ rhsm_arguments = ['repos', '--list-enabled']
+ elif list_parameter == 'list_disabled':
+ rhsm_arguments = ['repos', '--list-disabled']
+ elif list_parameter == 'list':
+ rhsm_arguments = ['repos', '--list']
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+
+ skip_lines = [
+ '+----------------------------------------------------------+',
+ ' Available Repositories in /etc/yum.repos.d/redhat.repo'
+ ]
+ repo_id_re = re.compile(r'Repo ID:\s+(.*)')
+ repo_name_re = re.compile(r'Repo Name:\s+(.*)')
+ repo_url_re = re.compile(r'Repo URL:\s+(.*)')
+ repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ if line == '' or line in skip_lines:
+ continue
+
+ repo_id_match = repo_id_re.match(line)
+ if repo_id_match:
+ repo_id = repo_id_match.group(1)
+ continue
+
+ repo_name_match = repo_name_re.match(line)
+ if repo_name_match:
+ repo_name = repo_name_match.group(1)
+ continue
+
+ repo_url_match = repo_url_re.match(line)
+ if repo_url_match:
+ repo_url = repo_url_match.group(1)
+ continue
+
+ repo_enabled_match = repo_enabled_re.match(line)
+ if repo_enabled_match:
+ repo_enabled = repo_enabled_match.group(1)
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, state, name, purge=False):
+ name = set(name)
+ current_repo_list = get_repository_list(module, 'list')
+ updated_repo_list = deepcopy(current_repo_list)
+ matched_existing_repo = {}
+ for repoid in name:
+ matched_existing_repo[repoid] = []
+ for idx, repo in enumerate(current_repo_list):
+ if fnmatch(repo['id'], repoid):
+ matched_existing_repo[repoid].append(repo)
+ # Update current_repo_list to return it as result variable
+ updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
+
+ changed = False
+ results = []
+ diff_before = ""
+ diff_after = ""
+ rhsm_arguments = ['repos']
+
+ for repoid in matched_existing_repo:
+ if len(matched_existing_repo[repoid]) == 0:
+ results.append("%s is not a valid repository ID" % repoid)
+ module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
+ for repo in matched_existing_repo[repoid]:
+ if state in ['disabled', 'absent']:
+ if repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
+ results.append("Repository '%s' is disabled for this system" % repo['id'])
+ rhsm_arguments += ['--disable', repo['id']]
+ elif state in ['enabled', 'present']:
+ if not repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
+ results.append("Repository '%s' is enabled for this system" % repo['id'])
+ rhsm_arguments += ['--enable', repo['id']]
+
+ # Disable all enabled repos on the system that are not in the task and not
+ # marked as disabled by the task
+ if purge:
+ enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
+ matched_repoids_set = set(matched_existing_repo.keys())
+ difference = enabled_repo_ids.difference(matched_repoids_set)
+ if len(difference) > 0:
+ for repoid in difference:
+ changed = True
+ diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
+ diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
+ results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
+ rhsm_arguments.extend(['--disable', repoid])
+
+ diff = {'before': diff_before,
+ 'after': diff_after,
+ 'before_header': "RHSM repositories",
+ 'after_header': "RHSM repositories"}
+
+ if not module.check_mode and changed:
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ results = out.splitlines()
+ module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
+ purge=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ purge = module.params['purge']
+
+ repository_modify(module, state, name, purge)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py
new file mode 100644
index 00000000..848a5e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ default: /etc/riak
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ default: 127.0.0.1:8098
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ default: riak@127.0.0.1
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: "Join's a Riak node to another node"
+ community.general.riak:
+ command: join
+ target_node: riak@10.1.1.1
+
+- name: Wait for handoffs to finish. Use with async and poll.
+ community.general.riak:
+ wait_for_handoffs: yes
+
+- name: Wait for riak_kv service to startup
+ community.general.riak:
+ wait_for_service: kv
+'''
+
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=False, type='int'),
+ wait_for_ring=dict(default=False, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs=dict(default=True, type='bool'))
+ )
+
+ command = module.params.get('command')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+
+ # make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except Exception:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'])
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % (riak_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py
new file mode 100644
index 00000000..13a93dd8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(example.com) or C(chat.example.com))
+ required: true
+ token:
+ type: str
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ type: str
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ type: str
+ description:
+ - Message to be sent.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specified during the creation of webhook.
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments.
+'''
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Rocket Chat all options
+ community.general.rocketchat:
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: '{{ inventory_hostname }} completed'
+ channel: #ansible
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+ delegate_to: localhost
+
+- name: Use the attachments API
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: Display my system load on host A and B
+ color: #ff00dd
+ title: System load
+ fields:
+ - title: System A
+ value: 'load average: 0,74, 0,66, 0,63'
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+ delegate_to: localhost
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: bool
+ sample: false
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload = "payload=" + module.jsonify(payload)
+ return payload
+
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False),
+ channel=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
new file mode 100644
index 00000000..161361b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ type: str
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ type: str
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ type: str
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ type: str
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ type: str
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ type: str
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+'''
+
+EXAMPLES = '''
+ - name: Rollbar deployment notification
+ community.general.rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
+
+ - name: Notify rollbar about current git revision deployment by current user
+ community.general.rollbar_deployment:
+ token: "{{ rollbar_access_token }}"
+ environment: production
+ revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}"
+ user: "{{ lookup('env', 'USER') }}"
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data, method='POST')
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
new file mode 100644
index 00000000..1caa159b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_acl_policy
+
+short_description: Manage Rundeck ACL policies.
+description:
+ - Create, update and remove Rundeck ACL policies through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ project:
+ type: str
+ description:
+ - Sets the project which receive the ACL policy.
+ - If unset, it's a system ACL policy.
+ policy:
+ type: str
+ description:
+ - Sets the ACL policy content.
+ - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
+ - It can be a YAML string or a pure Ansible inventory YAML object.
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create or update a rundeck ACL policy in project Ansible
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+ project: "Ansible"
+ policy:
+ description: "my policy"
+ context:
+ application: rundeck
+ for:
+ project:
+ - allow: read
+ by:
+ group: "build"
+
+- name: Remove a rundeck system policy
+ community.general.rundeck_acl_policy:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs.
+ returned: failed
+ type: str
+before:
+ description: Dictionary containing ACL policy informations before modification.
+ returned: success
+ type: dict
+after:
+ description: Dictionary containing ACL policy informations after modification.
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils._text import to_text
+import json
+import re
+
+
+class RundeckACLManager:
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != b"":
+ try:
+ json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (str(e), resp))
+ return resp, info
+
+ def get_acl(self):
+ resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
+ return resp
+
+ def create_or_update_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="POST",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 409:
+ self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_acl())
+ else:
+ if facts["contents"] == self.module.params["policy"]:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before=facts, after=facts)
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="PUT",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 200:
+ self.module.exit_json(changed=True, before=facts, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 404:
+ self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
+
+ def remove_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ policy=dict(type='str'),
+ project=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['policy']],
+ ],
+ supports_check_mode=True
+ )
+
+ if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
+ module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-")
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckACLManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_acl()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_acl()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py
new file mode 100644
index 00000000..5c846482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage rundeck projects
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_project
+
+short_description: Manage Rundeck projects.
+description:
+ - Create and remove Rundeck projects through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+
+- name: Remove a rundeck project
+ community.general.rundeck_project:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs
+ returned: failed
+ type: str
+before:
+ description: dictionary containing project information before modification
+ returned: success
+ type: dict
+after:
+ description: dictionary containing project information after modification
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+import json
+
+
+class RundeckProjectManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != "":
+ try:
+ json_resp = json.loads(resp)
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (to_native(e), resp))
+ return resp, info
+
+ def get_project_facts(self):
+ resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
+ return resp
+
+ def create_or_update_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
+
+ resp, info = self.request_rundeck_api("projects", method="POST", data={
+ "name": self.module.params["name"],
+ "config": {}
+ })
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_project_facts())
+ else:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ def remove_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckProjectManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_project()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_project()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py
new file mode 100644
index 00000000..b80ed8cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: runit
+author:
+- James Sumners (@jsumners)
+short_description: Manage runit services
+description:
+ - Controls runit services on remote hosts using the sv utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: yes
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ service (sv restart) and C(killed) will always bounce the service (sv force-stop).
+ C(reloaded) will send a HUP (sv reload).
+ C(once) will run a normally downed sv once (sv once), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ type: bool
+ service_dir:
+ description:
+ - directory runsv watches for services
+ type: str
+ default: /var/service
+ service_src:
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/sv
+'''
+
+EXAMPLES = r'''
+- name: Start sv dnscache, if not running
+ community.general.runit:
+ name: dnscache
+ state: started
+
+- name: Stop sv dnscache, if running
+ community.general.runit:
+ name: dnscache
+ state: stopped
+
+- name: Kill sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: killed
+
+- name: Restart sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: restarted
+
+- name: Reload sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+
+- name: Use alternative sv directory location
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+ service_dir: /run/service
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs): # @FIXME remove unused function?
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Sv(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = []
+ self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
+ self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.get_status()
+ else:
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+ # full_state *may* contain information about the logger:
+ # "down: /etc/service/service-without-logger: 1s, normally up\n"
+ # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
+ full_state_no_logger = self.full_state.split("; ")[0]
+
+ m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r' (\d+)s', full_state_no_logger)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(r'^run:', full_state_no_logger):
+ self.state = 'started'
+ elif re.search(r'^down:', full_state_no_logger):
+ self.state = 'stopped'
+ else:
+ self.state = 'unknown'
+ return
+
+ def started(self):
+ return self.start()
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, 'start', self.svc_full])
+
+ def stopped(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, 'once', self.svc_full])
+
+ def reloaded(self):
+ return self.reload()
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
+
+ def restarted(self):
+ return self.restart()
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
+
+ def killed(self):
+ return self.kill()
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e))
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ dist=dict(type='str', default='runit'), # @FIXME unused param?
+ service_dir=dict(type='str', default='/var/service'),
+ service_src=dict(type='str', default='/etc/sv'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+
+ sv = Sv(module)
+ changed = False
+ orig_state = sv.report()
+
+ if enabled is not None and enabled != sv.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ sv.enable()
+ else:
+ sv.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != sv.state:
+ changed = True
+ if not module.check_mode:
+ getattr(sv, state)()
+
+ module.exit_json(changed=changed, sv=sv.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py
new file mode 100644
index 00000000..1c66adf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Michael DeHaan <michael@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: say
+short_description: Makes a computer to speak.
+description:
+ - makes a computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
+ - If you like this module, you may also be interested in the osx_say callback plugin.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+options:
+ msg:
+ type: str
+ description:
+ What to say
+ required: true
+ voice:
+ type: str
+ description:
+ What voice to use
+ required: false
+requirements: [ say or espeak or espeak-ng ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- name: Makes a computer to speak
+ community.general.say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
+'''
+import platform
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def say(module, executable, msg, voice):
+ cmd = [executable, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+ possibles = ('say', 'espeak', 'espeak-ng')
+
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ voice = None
+
+ for possible in possibles:
+ executable = module.get_bin_path(possible)
+ if executable:
+ break
+ else:
+ module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
+
+ if module.check_mode:
+ module.exit_json(msg=msg, changed=False)
+
+ say(module, executable, msg, voice)
+
+ module.exit_json(msg=msg, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py
new file mode 100644
index 00000000..8df9a5e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py
@@ -0,0 +1,671 @@
+#!/usr/bin/python
+#
+# Scaleway Compute management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute
+short_description: Scaleway compute management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages compute instances on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ public_ip:
+ type: str
+ description:
+ - Manage public IP on a Scaleway server
+ - Could be Scaleway IP address UUID
+ - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - C(absent) Means no public IP at all
+ default: absent
+
+ enable_ipv6:
+ description:
+ - Enable public IPv6 connectivity on the instance
+ default: false
+ type: bool
+
+ image:
+ type: str
+ description:
+ - Image identifier used to start the instance with
+ required: true
+
+ name:
+ type: str
+ description:
+ - Name of the instance
+
+ organization:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+ - running
+ - restarted
+ - stopped
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the instance (5 max)
+ required: false
+ default: []
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ commercial_type:
+ type: str
+ description:
+ - Commercial name of the compute node
+ required: true
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the server to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the server
+ required: false
+ default: 3
+
+ security_group:
+ type: str
+ description:
+ - Security group unique identifier
+ - If no value provided, the default security group or current security group will be used
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ tags:
+ - test
+ - www
+
+- name: Create a server attached to a security group
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
+ tags:
+ - test
+ - www
+
+- name: Destroy it right after
+ community.general.scaleway_compute:
+ name: foobar
+ state: absent
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+'''
+
+RETURN = '''
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+SCALEWAY_SERVER_STATES = (
+ 'stopped',
+ 'stopping',
+ 'starting',
+ 'running',
+ 'locked'
+)
+
+SCALEWAY_TRANSITIONS_STATES = (
+ "stopping",
+ "starting",
+ "pending"
+)
+
+
+def check_image_id(compute_api, image_id):
+ response = compute_api.get(path="images/%s" % image_id)
+
+ if not response.ok:
+ msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
+ compute_api.module.fail_json(msg=msg)
+
+
+def fetch_state(compute_api, server):
+ compute_api.module.debug("fetch_state of server: %s" % server["id"])
+ response = compute_api.get(path="servers/%s" % server["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
+ return response.json["server"]["state"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(compute_api, server, wait=None):
+ if wait is None:
+ wait = compute_api.module.params["wait"]
+ if not wait:
+ return
+
+ wait_timeout = compute_api.module.params["wait_timeout"]
+ wait_sleep_time = compute_api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ compute_api.module.debug("We are going to wait for the server to finish its transition")
+ if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
+ compute_api.module.debug("It seems that the server is not in transition anymore.")
+ compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ compute_api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def public_ip_payload(compute_api, public_ip):
+ # We don't want a public ip
+ if public_ip in ("absent",):
+ return {"dynamic_ip_required": False}
+
+ # IP is only attached to the instance and is released as soon as the instance terminates
+ if public_ip in ("dynamic", "allocated"):
+ return {"dynamic_ip_required": True}
+
+ # We check that the IP we want to attach exists, if so its ID is returned
+ response = compute_api.get("ips")
+ if not response.ok:
+ msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ ip_list = []
+ try:
+ ip_list = response.json["ips"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
+
+ lookup = [ip["id"] for ip in ip_list]
+ if public_ip in lookup:
+ return {"public_ip": public_ip}
+
+
+def create_server(compute_api, server):
+ compute_api.module.debug("Starting a create_server")
+ target_server = None
+ data = {"enable_ipv6": server["enable_ipv6"],
+ "tags": server["tags"],
+ "commercial_type": server["commercial_type"],
+ "image": server["image"],
+ "dynamic_ip_required": server["dynamic_ip_required"],
+ "name": server["name"],
+ "organization": server["organization"]
+ }
+
+ if server["security_group"]:
+ data["security_group"] = server["security_group"]
+
+ response = compute_api.post(path="servers", data=data)
+
+ if not response.ok:
+ msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def restart_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="reboot")
+
+
+def stop_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweroff")
+
+
+def start_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweron")
+
+
+def perform_action(compute_api, server, action):
+ response = compute_api.post(path="servers/%s/action" % server["id"],
+ data={"action": action})
+ if not response.ok:
+ msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def remove_server(compute_api, server):
+ compute_api.module.debug("Starting remove server strategy")
+ response = compute_api.delete(path="servers/%s" % server["id"])
+ if not response.ok:
+ msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def present_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting present strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ return changed, target_server
+
+
+def absent_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting absent strategy")
+ changed = False
+ target_server = None
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ return changed, {"status": "Server already absent."}
+ else:
+ target_server = query_results[0]
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be made absent." % target_server["id"]}
+
+ # A server MUST be stopped to be deleted.
+ while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+ response = stop_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+
+ response = remove_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ return changed, {"status": "Server %s deleted" % target_server["id"]}
+
+
+def running_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting running strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being run."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("running", "starting"):
+ compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ response = start_server(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def stop_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting stop strategy")
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ changed = False
+
+ if not query_results:
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being stopped."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ changed = True
+ else:
+ target_server = query_results[0]
+
+ compute_api.module.debug("stop_strategy: Servers are found.")
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("stopped",):
+ compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be stopped." % target_server["id"]}
+
+ response = stop_server(compute_api=compute_api, server=target_server)
+ compute_api.module.debug(response.json)
+ compute_api.module.debug(response.ok)
+
+ if not response.ok:
+ msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def restart_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting restart strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being rebooted."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api,
+ target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+state_strategy = {
+ "present": present_strategy,
+ "restarted": restart_strategy,
+ "stopped": stop_strategy,
+ "running": running_strategy,
+ "absent": absent_strategy
+}
+
+
+def find(compute_api, wished_server, per_page=1):
+ compute_api.module.debug("Getting inside find")
+ # Only the name attribute is accepted in the Compute query API
+ response = compute_api.get("servers", params={"name": wished_server["name"],
+ "per_page": per_page})
+
+ if not response.ok:
+ msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ search_results = response.json["servers"]
+
+ return search_results
+
+
+PATCH_MUTABLE_SERVER_ATTRIBUTES = (
+ "ipv6",
+ "tags",
+ "name",
+ "dynamic_ip_required",
+ "security_group",
+)
+
+
+def server_attributes_should_be_changed(compute_api, target_server, wished_server):
+ compute_api.module.debug("Checking if server attributes should be changed")
+ compute_api.module.debug("Current Server: %s" % target_server)
+ compute_api.module.debug("Wished Server: %s" % wished_server)
+ debug_dict = dict((x, (target_server[x], wished_server[x]))
+ for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
+ if x in target_server and x in wished_server)
+ compute_api.module.debug("Debug dict %s" % debug_dict)
+ try:
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
+ ) and target_server[key]["id"] != wished_server[key]:
+ return True
+ # Handling other structure compare simply the two objects content
+ elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
+ return True
+ return False
+ except AttributeError:
+ compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
+
+
+def server_change_attributes(compute_api, target_server, wished_server):
+ compute_api.module.debug("Starting patching server attributes")
+ patch_payload = dict()
+
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
+ # Setting all key to current value except ID
+ key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
+ # Setting ID to the user specified ID
+ key_dict["id"] = wished_server[key]
+ patch_payload[key] = key_dict
+ elif not isinstance(target_server[key], dict):
+ patch_payload[key] = wished_server[key]
+
+ response = compute_api.patch(path="servers/%s" % target_server["id"],
+ data=patch_payload)
+ if not response.ok:
+ msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def core(module):
+ region = module.params["region"]
+ wished_server = {
+ "state": module.params["state"],
+ "image": module.params["image"],
+ "name": module.params["name"],
+ "commercial_type": module.params["commercial_type"],
+ "enable_ipv6": module.params["enable_ipv6"],
+ "tags": module.params["tags"],
+ "organization": module.params["organization"],
+ "security_group": module.params["security_group"]
+ }
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ compute_api = Scaleway(module=module)
+
+ check_image_id(compute_api, wished_server["image"])
+
+ # IP parameters of the wished server depends on the configuration
+ ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
+ wished_server.update(ip_payload)
+
+ changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
+ module.exit_json(changed=changed, msg=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ image=dict(required=True),
+ name=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ commercial_type=dict(required=True),
+ enable_ipv6=dict(default=False, type="bool"),
+ public_ip=dict(default="absent"),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ security_group=dict(),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
new file mode 100644
index 00000000..57803245
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+#
+# Scaleway database backups management module
+#
+# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_database_backup
+short_description: Scaleway database backups management module
+version_added: 1.2.0
+author: Guillaume Rodriguez (@guillaume_ro_fr)
+description:
+ - This module manages database backups on Scaleway account U(https://developer.scaleway.com).
+extends_documentation_fragment:
+ - community.general.scaleway
+options:
+ state:
+ description:
+ - Indicate desired state of the database backup.
+ - C(present) creates a backup.
+ - C(absent) deletes the backup.
+ - C(exported) creates a download link for the backup.
+ - C(restored) restores the backup to a new database.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - exported
+ - restored
+
+ region:
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ type: str
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ id:
+ description:
+ - UUID used to identify the database backup.
+ - Required for C(absent), C(exported) and C(restored) states.
+ type: str
+
+ name:
+ description:
+ - Name used to identify the database backup.
+ - Required for C(present) state.
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ database_name:
+ description:
+ - Name used to identify the database.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ instance_id:
+ description:
+ - UUID of the instance associated to the database backup.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ expires_at:
+ description:
+ - Expiration datetime of the database backup (ISO 8601 format).
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Time to wait for the backup to reach the expected state.
+ type: int
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ description:
+ - Time to wait before every attempt to check the state of the backup.
+ type: int
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+ - name: Create a backup
+ community.general.scaleway_database_backup:
+ name: 'my_backup'
+ state: present
+ region: 'fr-par'
+ database_name: 'my-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Export a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: exported
+ region: 'fr-par'
+
+ - name: Restore a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: restored
+ region: 'fr-par'
+ database_name: 'my-new-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Remove a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: absent
+ region: 'fr-par'
+'''
+
+RETURN = '''
+metadata:
+ description: Backup metadata.
+ returned: when C(state=present), C(state=exported) or C(state=restored)
+ type: dict
+ sample: {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
+ }
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ scaleway_argument_spec,
+ SCALEWAY_REGIONS,
+)
+
+stable_states = (
+ 'ready',
+ 'deleting',
+)
+
+
+def wait_to_complete_state_transition(module, account_api, backup=None):
+ wait_timeout = module.params['wait_timeout']
+ wait_sleep_time = module.params['wait_sleep_time']
+
+ if backup is None or backup['status'] in stable_states:
+ return backup
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ module.debug('We are going to wait for the backup to finish its transition')
+
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if not response.ok:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
+ break
+ response_json = response.json
+
+ if response_json['status'] in stable_states:
+ module.debug('It seems that the backup is not in transition anymore.')
+ module.debug('Backup in state: %s' % response_json['status'])
+ return response_json
+ time.sleep(wait_sleep_time)
+ else:
+ module.fail_json(msg='Backup takes too long to finish its transition')
+
+
+def present_strategy(module, account_api, backup):
+ name = module.params['name']
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+ expiration_date = module.params['expires_at']
+
+ if backup is not None:
+ if (backup['name'] == name or name is None) and (
+ backup['expires_at'] == expiration_date or expiration_date is None):
+ wait_to_complete_state_transition(module, account_api, backup)
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {}
+ if name is not None:
+ payload['name'] = name
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
+ payload)
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def absent_strategy(module, account_api, backup):
+ if backup is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def exported_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ if backup['download_url'] is not None:
+ module.exit_json(changed=False, metadata=backup)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+ response = account_api.post(
+ '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def restored_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+
+ payload = {'database_name': database_name, 'instance_id': instance_id}
+ response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
+ payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+state_strategy = {
+ 'present': present_strategy,
+ 'absent': absent_strategy,
+ 'exported': exported_strategy,
+ 'restored': restored_strategy,
+}
+
+
+def core(module):
+ state = module.params['state']
+ backup_id = module.params['id']
+
+ account_api = Scaleway(module)
+
+ if backup_id is None:
+ backup_by_id = None
+ else:
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
+ status_code = response.status_code
+ backup_json = response.json
+ backup_by_id = None
+ if status_code == 404:
+ backup_by_id = None
+ elif response.ok:
+ backup_by_id = backup_json
+ else:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
+
+ state_strategy[state](module, account_api, backup_by_id)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ id=dict(),
+ name=dict(type='str'),
+ database_name=dict(required=False),
+ instance_id=dict(required=False),
+ expires_at=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ wait_sleep_time=dict(type='int', default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['database_name', 'instance_id'],
+ ],
+ required_if=[
+ ['state', 'present', ['name', 'database_name', 'instance_id']],
+ ['state', 'absent', ['id']],
+ ['state', 'exported', ['id']],
+ ['state', 'restored', ['id', 'database_name', 'instance_id']],
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py
new file mode 100644
index 00000000..31bbfa76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_image_info) instead.
+short_description: Gather facts about the Scaleway images available.
+description:
+ - Gather facts about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images facts
+ community.general.scaleway_image_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_image_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_facts": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageFacts, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
new file mode 100644
index 00000000..3fad216e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_info
+short_description: Gather information about the Scaleway images available.
+description:
+ - Gather information about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images information
+ community.general.scaleway_image_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_image_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_image_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_info": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageInfo, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_image_info=ScalewayImageInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py
new file mode 100644
index 00000000..26da122e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+#
+# Scaleway IP management module
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_ip
+short_description: Scaleway IP management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages IP on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the IP.
+ default: present
+ choices:
+ - present
+ - absent
+
+ organization:
+ type: str
+ description:
+ - Scaleway organization identifier
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ id:
+ type: str
+ description:
+ - id of the Scaleway IP (UUID)
+
+ server:
+ type: str
+ description:
+ - id of the server you want to attach an IP to.
+ - To unattach an IP don't specify this option
+
+ reverse:
+ type: str
+ description:
+ - Reverse to assign to the IP
+'''
+
+EXAMPLES = '''
+- name: Create an IP
+ community.general.scaleway_ip:
+ organization: '{{ scw_org }}'
+ state: present
+ region: par1
+ register: ip_creation_task
+
+- name: Make sure IP deleted
+ community.general.scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ips": [
+ {
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
+ }
+ ]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def ip_attributes_should_be_changed(api, target_ip, wished_ip):
+ patch_payload = {}
+
+ if target_ip["reverse"] != wished_ip["reverse"]:
+ patch_payload["reverse"] = wished_ip["reverse"]
+
+ # IP is assigned to a server
+ if target_ip["server"] is None and wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+
+ # IP is unassigned to a server
+ try:
+ if target_ip["server"]["id"] and wished_ip["server"] is None:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ # IP is migrated between 2 different servers
+ try:
+ if target_ip["server"]["id"] != wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ return patch_payload
+
+
+def payload_from_wished_ip(wished_ip):
+ return dict(
+ (k, v)
+ for k, v in wished_ip.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, wished_ip):
+ changed = False
+
+ response = api.get('ips')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ ips_list = response.json["ips"]
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+
+ if wished_ip["id"] not in ip_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "An IP would be created."}
+
+ # Create IP
+ creation_response = api.post('/ips',
+ data=payload_from_wished_ip(wished_ip))
+
+ if not creation_response.ok:
+ msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+ return changed, creation_response.json["ip"]
+
+ target_ip = ip_lookup[wished_ip["id"]]
+ patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
+
+ if not patch_payload:
+ return changed, target_ip
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP attributes would be changed."}
+
+ ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
+ data=patch_payload)
+
+ if not ip_patch_response.ok:
+ api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
+ ip_patch_response.status_code, ip_patch_response.json['message']))
+
+ return changed, ip_patch_response.json["ip"]
+
+
+def absent_strategy(api, wished_ip):
+ response = api.get('ips')
+ changed = False
+
+ status_code = response.status_code
+ ips_json = response.json
+ ips_list = ips_json["ips"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+ if wished_ip["id"] not in ip_lookup.keys():
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP would be destroyed"}
+
+ response = api.delete('/ips/' + wished_ip["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+ wished_ip = {
+ "organization": module.params['organization'],
+ "reverse": module.params["reverse"],
+ "id": module.params["id"],
+ "server": module.params["server"]
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
+ else:
+ changed, summary = present_strategy(api=api, wished_ip=wished_ip)
+ module.exit_json(changed=changed, scaleway_ip=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ organization=dict(required=True),
+ server=dict(),
+ reverse=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ id=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py
new file mode 100644
index 00000000..4227f360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_ip_info) instead.
+short_description: Gather facts about the Scaleway ips available.
+description:
+ - Gather facts about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips facts
+ community.general.scaleway_ip_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_ip_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_facts": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpFacts, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
new file mode 100644
index 00000000..145fb203
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_info
+short_description: Gather information about the Scaleway ips available.
+description:
+ - Gather information about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips information
+ community.general.scaleway_ip_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_ip_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_ip_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_info": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpInfo, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_ip_info=ScalewayIpInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py
new file mode 100644
index 00000000..a9358188
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#
+# Scaleway Load-balancer management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_lb
+short_description: Scaleway load-balancer management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages load-balancers on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ name:
+ type: str
+ description:
+ - Name of the load-balancer
+ required: true
+
+ description:
+ type: str
+ description:
+ - Description of the load-balancer
+ required: true
+
+ organization_id:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway zone
+ required: true
+ choices:
+ - nl-ams
+ - fr-par
+ - pl-waw
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the load-balancer
+
+ wait:
+ description:
+ - Wait for the load-balancer to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the load-balancer to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the load-balancer
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+- name: Create a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: present
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+ tags:
+ - hello
+
+- name: Delete a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: absent
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+'''
+
+RETURNS = '''
+{
+ "scaleway_lb": {
+ "backend_count": 0,
+ "frontend_count": 0,
+ "description": "Description of my load-balancer",
+ "id": "00000000-0000-0000-0000-000000000000",
+ "instances": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.1",
+ "region": "fr-par",
+ "status": "ready"
+ },
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.2",
+ "region": "fr-par",
+ "status": "ready"
+ }
+ ],
+ "ip": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "192.168.0.1",
+ "lb_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "reverse": ""
+ }
+ ],
+ "name": "lb_ansible_test",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "status": "ready",
+ "tags": [
+ "first_tag",
+ "second_tag"
+ ]
+ }
+}
+'''
+
+import datetime
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "name",
+ "description"
+)
+
+
+def payload_from_wished_lb(wished_lb):
+ return {
+ "organization_id": wished_lb["organization_id"],
+ "name": wished_lb["name"],
+ "tags": wished_lb["tags"],
+ "description": wished_lb["description"]
+ }
+
+
+def fetch_state(api, lb):
+ api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
+ response = api.get(path=api.api_path + "/%s" % lb["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ api.module.fail_json(msg=msg)
+
+ try:
+ api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(api, lb, force_wait=False):
+ wait = api.module.params["wait"]
+ if not (wait or force_wait):
+ return
+ wait_timeout = api.module.params["wait_timeout"]
+ wait_sleep_time = api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ api.module.debug("We are going to wait for the load-balancer to finish its transition")
+ state = fetch_state(api, lb)
+ if state in STABLE_STATES:
+ api.module.debug("It seems that the load-balancer is not in transition anymore.")
+ api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def lb_attributes_should_be_changed(target_lb, wished_lb):
+ diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
+
+ if diff:
+ return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
+ else:
+ return diff
+
+
+def present_strategy(api, wished_lb):
+ changed = False
+
+ response = api.get(path=api.api_path)
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ lbs_list = response.json["lbs"]
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+
+ if wished_lb["name"] not in lb_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A load-balancer would be created."}
+
+ # Create Load-balancer
+ api.warn(payload_from_wished_lb(wished_lb))
+ creation_response = api.post(path=api.api_path,
+ data=payload_from_wished_lb(wished_lb))
+
+ if not creation_response.ok:
+ msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(api=api, lb=creation_response.json)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
+ wished_lb=wished_lb)
+
+ if not patch_payload:
+ return changed, target_lb
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer attributes would be changed."}
+
+ lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
+ data=patch_payload)
+
+ if not lb_patch_response.ok:
+ api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
+ lb_patch_response.status_code, lb_patch_response.json['message']))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, lb_patch_response.json
+
+
+def absent_strategy(api, wished_lb):
+ response = api.get(path=api.api_path)
+ changed = False
+
+ status_code = response.status_code
+ lbs_json = response.json
+ lbs_list = lbs_json["lbs"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+ if wished_lb["name"] not in lb_lookup.keys():
+ return changed, {}
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer would be destroyed"}
+
+ wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_load_balancer = {
+ "state": module.params["state"],
+ "name": module.params["name"],
+ "description": module.params["description"],
+ "tags": module.params["tags"],
+ "organization_id": module.params["organization_id"]
+ }
+ module.params['api_url'] = SCALEWAY_ENDPOINT
+ api = Scaleway(module=module)
+ api.api_path = "lb/v1/regions/%s/lbs" % region
+
+ changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
+ wished_lb=wished_load_balancer)
+ module.exit_json(changed=changed, scaleway_lb=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ description=dict(required=True),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization_id=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py
new file mode 100644
index 00000000..ee571cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_organization_info) instead.
+short_description: Gather facts about the Scaleway organizations available.
+description:
+ - Gather facts about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations facts
+ community.general.scaleway_organization_facts:
+'''
+
+RETURN = r'''
+---
+scaleway_organization_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_facts": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationFacts, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
new file mode 100644
index 00000000..f530dcb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_info
+short_description: Gather information about the Scaleway organizations available.
+description:
+ - Gather information about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations information
+ community.general.scaleway_organization_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_organization_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_organization_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_info": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationInfo, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
new file mode 100644
index 00000000..9303e06e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group
+short_description: Scaleway Security Group management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group on Scaleway account
+ U(https://developer.scaleway.com).
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ description:
+ - Indicate desired state of the Security Group.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+ organization:
+ description:
+ - Organization identifier.
+ type: str
+ required: true
+
+ region:
+ description:
+ - Scaleway region to use (for example C(par1)).
+ type: str
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ description:
+ - Name of the Security Group.
+ type: str
+ required: true
+
+ description:
+ description:
+ - Description of the Security Group.
+ type: str
+
+ stateful:
+ description:
+ - Create a stateful security group which allows established connections in and out.
+ type: bool
+ required: true
+
+ inbound_default_policy:
+ description:
+ - Default policy for incoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ outbound_default_policy:
+ description:
+ - Default policy for outcoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ organization_default:
+ description:
+ - Create security group to be the default one.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Create a Security Group
+ community.general.scaleway_security_group:
+ state: present
+ region: par1
+ name: security_group
+ description: "my security group description"
+ organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+from uuid import uuid4
+
+
+def payload_from_security_group(security_group):
+ return dict(
+ (k, v)
+ for k, v in security_group.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, security_group):
+ ret = {'changed': False}
+
+ response = api.get('security_groups')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+
+ if security_group['name'] not in security_group_lookup.keys():
+ ret['changed'] = True
+ if api.module.check_mode:
+ # Help user when check mode is enabled by defining id key
+ ret['scaleway_security_group'] = {'id': str(uuid4())}
+ return ret
+
+ # Create Security Group
+ response = api.post('/security_groups',
+ data=payload_from_security_group(security_group))
+
+ if not response.ok:
+ msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+ ret['scaleway_security_group'] = response.json['security_group']
+
+ else:
+ ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
+
+ return ret
+
+
+def absent_strategy(api, security_group):
+ response = api.get('security_groups')
+ ret = {'changed': False}
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+ if security_group['name'] not in security_group_lookup.keys():
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ security_group = {
+ 'organization': module.params['organization'],
+ 'name': module.params['name'],
+ 'description': module.params['description'],
+ 'stateful': module.params['stateful'],
+ 'inbound_default_policy': module.params['inbound_default_policy'],
+ 'outbound_default_policy': module.params['outbound_default_policy'],
+ 'organization_default': module.params['organization_default'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ api = Scaleway(module=module)
+ if module.params['state'] == 'present':
+ summary = present_strategy(api=api, security_group=security_group)
+ else:
+ summary = absent_strategy(api=api, security_group=security_group)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ organization=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ stateful=dict(type='bool', required=True),
+ inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ organization_default=dict(type='bool'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py
new file mode 100644
index 00000000..a43bfedb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_security_group_info) instead.
+short_description: Gather facts about the Scaleway security groups available.
+description:
+ - Gather facts about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups facts
+ community.general.scaleway_security_group_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_facts": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupFacts, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
new file mode 100644
index 00000000..d3488f0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_info
+short_description: Gather information about the Scaleway security groups available.
+description:
+ - Gather information about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups information
+ community.general.scaleway_security_group_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_security_group_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_info": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupInfo, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
new file mode 100644
index 00000000..054a4d47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group Rule management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group_rule
+short_description: Scaleway Security Group Rule management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group Rule on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the Security Group Rule.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ protocol:
+ type: str
+ description:
+ - Network protocol to use
+ choices:
+ - TCP
+ - UDP
+ - ICMP
+ required: true
+
+ port:
+ description:
+ - Port related to the rule, null value for all the ports
+ required: true
+ type: int
+
+ ip_range:
+ type: str
+ description:
+ - IPV4 CIDR notation to apply to the rule
+ default: 0.0.0.0/0
+
+ direction:
+ type: str
+ description:
+ - Rule direction
+ choices:
+ - inbound
+ - outbound
+ required: true
+
+ action:
+ type: str
+ description:
+ - Rule action
+ choices:
+ - accept
+ - drop
+ required: true
+
+ security_group:
+ type: str
+ description:
+ - Security Group unique identifier
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a Security Group Rule
+ community.general.scaleway_security_group_rule:
+ state: present
+ region: par1
+ protocol: TCP
+ port: 80
+ ip_range: 0.0.0.0/0
+ direction: inbound
+ action: accept
+ security_group: b57210ee-1281-4820-a6db-329f78596ecb
+ register: security_group_rule_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
+from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_sgr_from_api(security_group_rules, security_group_rule):
+ """ Check if a security_group_rule specs are present in security_group_rules
+ Return None if no rules match the specs
+ Return the rule if found
+ """
+ for sgr in security_group_rules:
+ if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
+ sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
+ sgr['protocol'] == security_group_rule['protocol']):
+ return sgr
+
+ return None
+
+
+def present_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ # Create Security Group Rule
+ response = api.post('/security_groups/%s/rules' % security_group_id,
+ data=payload_from_object(security_group_rule))
+
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error during security group rule creation: "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+ ret['scaleway_security_group_rule'] = response.json['rule']
+
+ else:
+ ret['scaleway_security_group_rule'] = existing_rule
+
+ return ret
+
+
+def absent_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete(
+ '/security_groups/%s/rules/%s' %
+ (security_group_id, existing_rule['id']))
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error deleting security group rule "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ api = Scaleway(module=module)
+
+ security_group_rule = {
+ 'protocol': module.params['protocol'],
+ 'dest_port_from': module.params['port'],
+ 'ip_range': module.params['ip_range'],
+ 'direction': module.params['direction'],
+ 'action': module.params['action'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ if module.params['state'] == 'present':
+ summary = present_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ else:
+ summary = absent_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
+ port=dict(type='int', required=True),
+ ip_range=dict(type='str', default='0.0.0.0/0'),
+ direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
+ action=dict(type='str', required=True, choices=['accept', 'drop']),
+ security_group=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py
new file mode 100644
index 00000000..d3e73669
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_server_info) instead.
+short_description: Gather facts about the Scaleway servers available.
+description:
+ - Gather facts about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers facts
+ community.general.scaleway_server_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_server_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_facts": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerFacts, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
new file mode 100644
index 00000000..43b0badc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_info
+short_description: Gather information about the Scaleway servers available.
+description:
+ - Gather information about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers information
+ community.general.scaleway_server_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_server_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_server_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_info": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerInfo, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_server_info=ScalewayServerInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py
new file mode 100644
index 00000000..25f99e72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_snapshot_info) instead.
+short_description: Gather facts about the Scaleway snapshots available.
+description:
+ - Gather facts about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots facts
+ community.general.scaleway_snapshot_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_facts": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotFacts, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
new file mode 100644
index 00000000..f31b74b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_info
+short_description: Gather information about the Scaleway snapshots available.
+description:
+ - Gather information about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots information
+ community.general.scaleway_snapshot_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_snapshot_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_info": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotInfo, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
new file mode 100644
index 00000000..08555b23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+#
+# Scaleway SSH keys management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_sshkey
+short_description: Scaleway SSH keys management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages SSH keys on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the SSH key.
+ default: present
+ choices:
+ - present
+ - absent
+ ssh_pub_key:
+ type: str
+ description:
+ - The public SSH key as a string to add.
+ required: true
+ api_url:
+ type: str
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+'''
+
+EXAMPLES = '''
+- name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+- name: "Delete SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "absent"
+
+- name: "Add SSH key with explicit token"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+ oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ssh_public_keys": [
+ {"key": "ssh-rsa AAAA...."}
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
+
+
+def extract_present_sshkeys(raw_organization_dict):
+ ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
+ ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
+ return ssh_key_lookup
+
+
+def extract_user_id(raw_organization_dict):
+ return raw_organization_dict["organizations"][0]["users"][0]["id"]
+
+
+def sshkey_user_patch(ssh_lookup):
+ ssh_list = {"ssh_public_keys": [{"key": key}
+ for key in ssh_lookup]}
+ return ssh_list
+
+
+def core(module):
+ ssh_pub_key = module.params['ssh_pub_key']
+ state = module.params["state"]
+ account_api = Scaleway(module)
+ response = account_api.get('organizations')
+
+ status_code = response.status_code
+ organization_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ user_id = extract_user_id(organization_json)
+ present_sshkeys = []
+ try:
+ present_sshkeys = extract_present_sshkeys(organization_json)
+ except (KeyError, IndexError) as e:
+ module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
+
+ if state in ('present',):
+ if ssh_pub_key in present_sshkeys:
+ module.exit_json(changed=False)
+
+ # If key not found create it!
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.append(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if ssh_pub_key not in present_sshkeys:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.remove(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ ssh_pub_key=dict(required=True),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
new file mode 100644
index 00000000..4a38e76d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# Scaleway user data management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_user_data
+short_description: Scaleway user_data management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages user_data on compute instances on Scaleway."
+ - "It can be used to configure cloud-init for instance"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ server_id:
+ type: str
+ description:
+ - Scaleway Compute instance ID of the server
+ required: true
+
+ user_data:
+ type: dict
+ description:
+ - User defined data. Typically used with `cloud-init`.
+ - Pass your cloud-init script here as a string
+ required: false
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = '''
+- name: Update the cloud-init
+ community.general.scaleway_user_data:
+ server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
+ region: ams1
+ user_data:
+ cloud-init: 'final_message: "Hello World!"'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+
+def patch_user_data(compute_api, server_id, key, value):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def delete_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
+
+ response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
+
+ if not response.ok:
+ msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def get_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.get(path=path)
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ server_id = module.params["server_id"]
+ user_data = module.params["user_data"]
+ changed = False
+
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+ compute_api = Scaleway(module=module)
+
+ user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
+ if not user_data_list.ok:
+ msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
+ compute_api.module.fail_json(msg=msg)
+
+ present_user_data_keys = user_data_list.json["user_data"]
+ present_user_data = dict(
+ (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
+ for key in present_user_data_keys
+ )
+
+ if present_user_data == user_data:
+ module.exit_json(changed=changed, msg=user_data_list.json)
+
+ # First we remove keys that are not defined in the wished user_data
+ for key in present_user_data:
+ if key not in user_data:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
+
+ # Then we patch keys that are different
+ for key, value in user_data.items():
+ if key not in present_user_data or user_data[key] != present_user_data[key]:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
+
+ module.exit_json(changed=changed, msg=user_data)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ user_data=dict(type="dict"),
+ server_id=dict(required=True),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py
new file mode 100644
index 00000000..e879d3c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+#
+# Scaleway volumes management module
+#
+# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_volume
+short_description: Scaleway volumes management module
+author: Henryk Konsek (@hekonsek)
+description:
+ - This module manages volumes on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the volume.
+ default: present
+ choices:
+ - present
+ - absent
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+ name:
+ type: str
+ description:
+ - Name used to identify the volume.
+ required: true
+ organization:
+ type: str
+ description:
+ - ScaleWay organization ID to which volume belongs.
+ size:
+ type: int
+ description:
+ - Size of the volume in bytes.
+ volume_type:
+ type: str
+ description:
+ - Type of the volume (for example 'l_ssd').
+'''
+
+EXAMPLES = '''
+- name: Create 10GB volume
+ community.general.scaleway_volume:
+ name: my-volume
+ state: present
+ region: par1
+ organization: "{{ scw_org }}"
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- name: Make sure volume deleted
+ community.general.scaleway_volume:
+ name: my-volume
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "volume": {
+ "export_uri": null,
+ "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
+ "name": "volume-0-3",
+ "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
+ "server": null,
+ "size": 10000000000,
+ "volume_type": "l_ssd"
+ }
+}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def core(module):
+ state = module.params['state']
+ name = module.params['name']
+ organization = module.params['organization']
+ size = module.params['size']
+ volume_type = module.params['volume_type']
+
+ account_api = Scaleway(module)
+ response = account_api.get('volumes')
+ status_code = response.status_code
+ volumes_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ volumeByName = None
+ for volume in volumes_json['volumes']:
+ if volume['organization'] == organization and volume['name'] == name:
+ volumeByName = volume
+
+ if state in ('present',):
+ if volumeByName is not None:
+ module.exit_json(changed=False)
+
+ payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
+
+ response = account_api.post('/volumes', payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if volumeByName is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/volumes/' + volumeByName['id'])
+ if response.status_code == 204:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ size=dict(type='int'),
+ organization=dict(),
+ volume_type=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py
new file mode 100644
index 00000000..e894f965
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_volume_info) instead.
+short_description: Gather facts about the Scaleway volumes available.
+description:
+ - Gather facts about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes facts
+ community.general.scaleway_volume_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_volume_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_facts": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeFacts, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
new file mode 100644
index 00000000..ff6093e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_info
+short_description: Gather information about the Scaleway volumes available.
+description:
+ - Gather information about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes information
+ community.general.scaleway_volume_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_volume_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_volume_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_info": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeInfo, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py
new file mode 100644
index 00000000..457e2e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+- Manages SELinux file context mapping definitions.
+- Similar to the C(semanage fcontext) command.
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: yes
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ type: str
+ required: yes
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: no
+notes:
+- The changes are persistent across reboots.
+- The M(community.general.sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ community.general.sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+
+- name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())),
+ setype=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py
new file mode 100644
index 00000000..0d1f9f59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove a domain from the list of permissive domains.
+options:
+ domain:
+ description:
+ - The domain that will be added or removed from the list of permissive domains.
+ type: str
+ required: true
+ default: ''
+ aliases: [ name ]
+ permissive:
+ description:
+ - Indicate if the domain should or should not be set as permissive.
+ type: bool
+ required: true
+ no_reload:
+ description:
+ - Disable reloading of the SELinux policy after making change to a domain's permissive setting.
+ - The default is C(no), which causes policy to be reloaded when a domain changes state.
+ - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
+ type: bool
+ default: no
+ store:
+ description:
+ - Name of the SELinux policy store to use.
+ type: str
+notes:
+ - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
+requirements: [ policycoreutils-python ]
+author:
+- Michael Scherer (@mscherer) <misc@zarb.org>
+'''
+
+EXAMPLES = r'''
+- name: Change the httpd_t domain to permissive
+ community.general.selinux_permissive:
+ name: httpd_t
+ permissive: true
+'''
+
+import traceback
+
+HAVE_SEOBJECT = False
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True, aliases=['name']),
+ store=dict(type='str', default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
+ exception=SEOBJECT_IMP_ERR)
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py
new file mode 100644
index 00000000..7036dad9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+options:
+ login:
+ type: str
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ type: str
+ description:
+ - SELinux user name
+ selevel:
+ type: str
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ type: str
+ description:
+ - Desired mapping value.
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+- name: Modify the default user on the system to the guest_u user
+ community.general.selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+- name: Assign gijoe user on an MLS machine a range and to the staff_u user
+ community.general.selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+- name: Assign all users in the engineering group to the staff_u user
+ community.general.selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py
new file mode 100644
index 00000000..67132771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
+requirements:
+ - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
+options:
+ username:
+ type: str
+ description:
+ - Username for logging into the SendGrid account.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ password:
+ type: str
+ description:
+ - Password that corresponds to the username.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ from_address:
+ type: str
+ description:
+ - The address in the "from" field for the email.
+ required: true
+ to_addresses:
+ type: list
+ description:
+ - A list with one or more recipient email addresses.
+ required: true
+ subject:
+ type: str
+ description:
+ - The desired subject for the email.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Sendgrid API key to use instead of username/password.
+ cc:
+ type: list
+ description:
+ - A list of email addresses to cc.
+ bcc:
+ type: list
+ description:
+ - A list of email addresses to bcc.
+ attachments:
+ type: list
+ description:
+ - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
+ from_name:
+ type: str
+ description:
+ - The name you want to appear in the from field, i.e 'John Doe'.
+ html_body:
+ description:
+ - Whether the body is html content that should be rendered.
+ type: bool
+ default: 'no'
+ headers:
+ type: dict
+ description:
+ - A dict to pass on as headers.
+ body:
+ type: str
+ description:
+ - The e-mail body content.
+ required: yes
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = r'''
+- name: Send an email to a single recipient that the deployment was successful
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+- name: Send an email to more than one recipient that the build failed
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+SENDGRID_IMP_ERR = None
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ SENDGRID_IMP_ERR = traceback.format_exc()
+ HAS_SENDGRID = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.urls import fetch_url
+
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
+ encoded_data = urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ recipient = to_bytes(recipient, errors='surrogate_or_strict')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+ # Remove this check when adding Sendgrid API v3 support
+ if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
+ module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.")
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together=[['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ reason = 'when using any of the following arguments: ' \
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments'
+ module.fail_json(msg=missing_required_lib('sendgrid', reason=reason),
+ exception=SENDGRID_IMP_ERR)
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py
new file mode 100644
index 00000000..9ebe2765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ type: str
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ handlers:
+ type: list
+ description:
+ - List of handlers to notify when the check fails
+ default: []
+ subscribers:
+ type: list
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ default: []
+ interval:
+ type: int
+ description:
+ - Check interval in seconds
+ timeout:
+ type: int
+ description:
+ - Timeout for the check
+ - If not specified, it defaults to 10.
+ ttl:
+ type: int
+ description:
+ - Time to live in seconds until the check is considered stale
+ handle:
+ description:
+ - Whether the check should be handled or not
+ - Default is C(false).
+ type: bool
+ subdue_begin:
+ type: str
+ description:
+ - When to disable handling of check failures
+ subdue_end:
+ type: str
+ description:
+ - When to enable handling of check failures
+ dependencies:
+ type: list
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ type: bool
+ default: 'no'
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ - Default is C(false).
+ type: bool
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ - Default is C(false).
+ type: bool
+ occurrences:
+ type: int
+ description:
+ - Number of event occurrences before the handler should take action
+ - If not specified, defaults to 1.
+ refresh:
+ type: int
+ description:
+ - Number of seconds handlers should wait before taking second action
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ - Default is C(false).
+ type: bool
+ low_flap_threshold:
+ type: int
+ description:
+ - The low threshold for flap detection
+ high_flap_threshold:
+ type: int
+ description:
+ - The high threshold for flap detection
+ custom:
+ type: dict
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ default: {}
+ source:
+ type: str
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: Get cpu metrics
+ community.general.sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: yes
+ handlers: relay
+ subscribers: common
+ interval: 60
+
+# Check whether nginx is running
+- name: Check nginx process
+ community.general.sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: Check disk
+ community.general.sensu_check:
+ name: check_disk_capacity
+ state: absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'ttl',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k, v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py
new file mode 100644
index 00000000..35444f60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_client
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu client configuration
+description:
+ - Manages Sensu client configuration.
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the client should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the client. The name cannot contain special characters or spaces.
+ - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
+ address:
+ type: str
+ description:
+ - An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
+ subscriptions:
+ type: list
+ description:
+ - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
+ - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
+ - The subscriptions array items must be strings.
+ safe_mode:
+ description:
+ - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
+ type: bool
+ default: 'no'
+ redact:
+ type: list
+ description:
+ - Client definition attributes to redact (values) when logging and sending client keepalives.
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the Sensu client socket.
+ keepalives:
+ description:
+ - If Sensu should monitor keepalives for this client.
+ type: bool
+ default: 'yes'
+ keepalive:
+ type: dict
+ description:
+ - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
+ registration:
+ type: dict
+ description:
+ - The registration definition scope, used to configure Sensu registration event handlers.
+ deregister:
+ description:
+ - If a deregistration event should be created upon Sensu client process stop.
+ - Default is C(false).
+ type: bool
+ deregistration:
+ type: dict
+ description:
+ - The deregistration definition scope, used to configure automated Sensu client de-registration.
+ ec2:
+ type: dict
+ description:
+ - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
+ chef:
+ type: dict
+ description:
+ - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
+ puppet:
+ type: dict
+ description:
+ - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
+ servicenow:
+ type: dict
+ description:
+ - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Minimum possible configuration
+- name: Configure Sensu client
+ community.general.sensu_client:
+ subscriptions:
+ - default
+
+# With customization
+- name: Configure Sensu client
+ community.general.sensu_client:
+ name: "{{ ansible_fqdn }}"
+ address: "{{ ansible_default_ipv4['address'] }}"
+ subscriptions:
+ - default
+ - webserver
+ redact:
+ - password
+ socket:
+ bind: 127.0.0.1
+ port: 3030
+ keepalive:
+ thresholds:
+ warning: 180
+ critical: 300
+ handlers:
+ - email
+ custom:
+ - broadcast: irc
+ occurrences: 3
+ register: client
+ notify:
+ - Restart sensu-client
+
+- name: Secure Sensu client configuration file
+ ansible.builtin.file:
+ path: "{{ client['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+
+- name: Delete the Sensu client configuration
+ community.general.sensu_client:
+ state: "absent"
+'''
+
+RETURN = '''
+config:
+ description: Effective client configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'client', 'subscriptions': ['default']}
+file:
+ description: Path to the client configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/client.json"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=False),
+ address=dict(type='str', required=False),
+ subscriptions=dict(type='list', required=False),
+ safe_mode=dict(type='bool', required=False, default=False),
+ redact=dict(type='list', required=False),
+ socket=dict(type='dict', required=False),
+ keepalives=dict(type='bool', required=False, default=True),
+ keepalive=dict(type='dict', required=False),
+ registration=dict(type='dict', required=False),
+ deregister=dict(type='bool', required=False),
+ deregistration=dict(type='dict', required=False),
+ ec2=dict(type='dict', required=False),
+ chef=dict(type='dict', required=False),
+ puppet=dict(type='dict', required=False),
+ servicenow=dict(type='dict', required=False)
+ ),
+ required_if=[
+ ['state', 'present', ['subscriptions']]
+ ]
+ )
+
+ state = module.params['state']
+ path = "/etc/sensu/conf.d/client.json"
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build client configuration from module arguments
+ config = {'client': {}}
+ args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
+ 'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
+ 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['client'][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Client configuration is already up to date',
+ config=config['client'],
+ file=path)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Client configuration would have been updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+
+ try:
+ with open(path, 'w') as client:
+ client.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Client configuration updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py
new file mode 100644
index 00000000..53152edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_handler
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu handler configuration
+description:
+ - Manages Sensu handler configuration
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the handler should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the handler. The name cannot contain special characters or spaces.
+ required: True
+ type:
+ type: str
+ description:
+ - The handler type
+ choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
+ filter:
+ type: str
+ description:
+ - The Sensu event filter (name) to use when filtering events for the handler.
+ filters:
+ type: list
+ description:
+ - An array of Sensu event filters (names) to use when filtering events for the handler.
+ - Each array item must be a string.
+ severities:
+ type: list
+ description:
+ - An array of check result severities the handler will handle.
+ - 'NOTE: event resolution bypasses this filtering.'
+ - "Example: [ 'warning', 'critical', 'unknown' ]."
+ mutator:
+ type: str
+ description:
+ - The Sensu event mutator (name) to use to mutate event data for the handler.
+ timeout:
+ type: int
+ description:
+ - The handler execution duration timeout in seconds (hard stop).
+ - Only used by pipe and tcp handler types.
+ default: 10
+ handle_silenced:
+ description:
+ - If events matching one or more silence entries should be handled.
+ type: bool
+ default: 'no'
+ handle_flapping:
+ description:
+ - If events in the flapping state should be handled.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - The handler command to be executed.
+ - The event data is passed to the process via STDIN.
+ - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the TCP/UDP handler socket.
+ - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
+ pipe:
+ type: dict
+ description:
+ - The pipe definition scope, used to configure the Sensu transport pipe.
+ - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
+ handlers:
+ type: list
+ description:
+ - An array of Sensu event handlers (names) to use for events using the handler set.
+ - Each array item must be a string.
+ - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Configure a handler that sends event data as STDIN (pipe)
+- name: Configure IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ type: "pipe"
+ command: "/usr/local/bin/notify-irc.sh"
+ severities:
+ - "ok"
+ - "critical"
+ - "warning"
+ - "unknown"
+ timeout: 15
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+# Delete a handler
+- name: Delete IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ state: "absent"
+
+# Example of a TCP handler
+- name: Configure TCP Sensu handler
+ community.general.sensu_handler:
+ name: "tcp_handler"
+ type: "tcp"
+ timeout: 30
+ socket:
+ host: "10.0.1.99"
+ port: 4444
+ register: handler
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+- name: Secure Sensu handler configuration file
+ ansible.builtin.file:
+ path: "{{ handler['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+'''
+
+RETURN = '''
+config:
+ description: Effective handler configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+file:
+ description: Path to the handler configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/handlers/irc.json"
+name:
+ description: Name of the handler
+ returned: success
+ type: str
+ sample: "irc"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
+ filter=dict(type='str', required=False),
+ filters=dict(type='list', required=False),
+ severities=dict(type='list', required=False),
+ mutator=dict(type='str', required=False),
+ timeout=dict(type='int', required=False, default=10),
+ handle_silenced=dict(type='bool', required=False, default=False),
+ handle_flapping=dict(type='bool', required=False, default=False),
+ command=dict(type='str', required=False),
+ socket=dict(type='dict', required=False),
+ pipe=dict(type='dict', required=False),
+ handlers=dict(type='list', required=False),
+ ),
+ required_if=[
+ ['state', 'present', ['type']],
+ ['type', 'pipe', ['command']],
+ ['type', 'tcp', ['socket']],
+ ['type', 'udp', ['socket']],
+ ['type', 'transport', ['pipe']],
+ ['type', 'set', ['handlers']]
+ ]
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build handler configuration from module arguments
+ config = {'handlers': {name: {}}}
+ args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
+ 'handle_silenced', 'handle_flapping', 'command', 'socket',
+ 'pipe', 'handlers']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['handlers'][name][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Handler configuration is already up to date',
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Handler configuration would have been updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ try:
+ with open(path, 'w') as handler:
+ handler.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Handler configuration updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py
new file mode 100644
index 00000000..12dc5d20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Steven Bambling <smbambling@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_silence
+author: Steven Bambling (@smbambling)
+short_description: Manage Sensu silence entries
+description:
+ - Create and clear (delete) a silence entries via the Sensu API
+ for subscriptions and checks.
+options:
+ check:
+ type: str
+ description:
+ - Specifies the check which the silence entry applies to.
+ creator:
+ type: str
+ description:
+ - Specifies the entity responsible for this entry.
+ expire:
+ type: int
+ description:
+ - If specified, the silence entry will be automatically cleared
+ after this number of seconds.
+ expire_on_resolve:
+ description:
+ - If specified as true, the silence entry will be automatically
+ cleared once the condition it is silencing is resolved.
+ type: bool
+ reason:
+ type: str
+ description:
+ - If specified, this free-form string is used to provide context or
+ rationale for the reason this silence entry was created.
+ state:
+ type: str
+ description:
+ - Specifies to create or clear (delete) a silence entry via the Sensu API
+ default: present
+ choices: ['present', 'absent']
+ subscription:
+ type: str
+ description:
+ - Specifies the subscription which the silence entry applies to.
+ - To create a silence entry for a client prepend C(client:) to client name.
+ Example - C(client:server1.example.dev)
+ required: true
+ url:
+ type: str
+ description:
+ - Specifies the URL of the Sensu monitoring host server.
+ required: false
+ default: http://127.0.01:4567
+'''
+
+EXAMPLES = '''
+# Silence ALL checks for a given client
+- name: Silence server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ creator: "{{ ansible_user_id }}"
+ reason: Performing maintenance
+
+# Silence specific check for a client
+- name: Silence CPU_Usage check for server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ check: CPU_Usage
+ creator: "{{ ansible_user_id }}"
+ reason: Investigation alert issue
+
+# Silence multiple clients from a dict
+ silence:
+ server1.example.dev:
+ reason: 'Deployment in progress'
+ server2.example.dev:
+ reason: 'Deployment in progress'
+
+- name: Silence several clients from a dict
+ community.general.sensu_silence:
+ subscription: "client:{{ item.key }}"
+ reason: "{{ item.value.reason }}"
+ creator: "{{ ansible_user_id }}"
+ with_dict: "{{ silence }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def query(module, url, check, subscription):
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='GET',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] == 500:
+ module.fail_json(
+ msg="Failed to query silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def clear(module, url, check, subscription):
+ # Test if silence exists before clearing
+ (rc, out, changed) = query(module, url, check, subscription)
+
+ d = dict((i['subscription'], i['check']) for i in out)
+ subscription_exists = subscription in d
+ if check and subscription_exists:
+ exists = (check == d[subscription])
+ else:
+ exists = subscription_exists
+
+ # If check/subscription doesn't exist
+ # exit with changed state of False
+ if not exists:
+ return False, out, changed
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced/clear'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 204:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def create(
+ module, url, check, creator, expire,
+ expire_on_resolve, reason, subscription):
+ (rc, out, changed) = query(module, url, check, subscription)
+ for i in out:
+ if (i['subscription'] == subscription):
+ if (
+ (check is None or check == i['check']) and
+ (
+ creator == '' or
+ creator == i['creator']) and
+ (
+ reason == '' or
+ reason == i['reason']) and
+ (
+ expire is None or expire == i['expire']) and
+ (
+ expire_on_resolve is None or
+ expire_on_resolve == i['expire_on_resolve']
+ )
+ ):
+ return False, out, False
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'creator': creator,
+ 'expire': expire,
+ 'expire_on_resolve': expire_on_resolve,
+ 'reason': reason,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 201:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" %
+ (subscription, info['msg'])
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ check=dict(required=False),
+ creator=dict(required=False),
+ expire=dict(type='int', required=False),
+ expire_on_resolve=dict(type='bool', required=False),
+ reason=dict(required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ subscription=dict(required=True),
+ url=dict(required=False, default='http://127.0.01:4567'),
+ ),
+ supports_check_mode=True
+ )
+
+ url = module.params['url']
+ check = module.params['check']
+ creator = module.params['creator']
+ expire = module.params['expire']
+ expire_on_resolve = module.params['expire_on_resolve']
+ reason = module.params['reason']
+ subscription = module.params['subscription']
+ state = module.params['state']
+
+ if state == 'present':
+ (rc, out, changed) = create(
+ module, url, check, creator,
+ expire, expire_on_resolve, reason, subscription
+ )
+
+ if state == 'absent':
+ (rc, out, changed) = clear(module, url, check, subscription)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py
new file mode 100644
index 00000000..6316254d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ type: str
+ description:
+ - The name of the channel
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann (@andsens)
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the module changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: Subscribe to nginx checks
+ community.general.sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: Unsubscribe from common checks
+ community.general.sensu_subscription: name=common state=absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ config = json.load(open(path))
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed, reasons
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py
new file mode 100644
index 00000000..71df8d6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+options:
+ ports:
+ description:
+ - Ports or port ranges.
+ - Can be a list (since 2.6) or comma separated string.
+ type: list
+ elements: str
+ required: true
+ proto:
+ description:
+ - Protocol for the specified port.
+ type: str
+ required: true
+ choices: [ tcp, udp ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ type: str
+ required: true
+ state:
+ description:
+ - Desired boolean value.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: no
+notes:
+ - The changes are persistent across reboots.
+ - Not tested on any debian based system.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dan Keder (@dankeder)
+'''
+
+EXAMPLES = r'''
+- name: Allow Apache to listen on tcp port 8888
+ community.general.seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
+- name: Allow sshd to listen on tcp port 8991
+ community.general.seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+'''
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ if isinstance(port, str):
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ else:
+ ports = (port, port)
+
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ ports=dict(type='list', elements='str', required=True),
+ proto=dict(type='str', required=True, choices=['tcp', 'udp']),
+ setype=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = module.params['ports']
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py
new file mode 100644
index 00000000..912d4226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
+options:
+ state:
+ description:
+ - Goal state of given stage/project.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ serverless_bin_path:
+ description:
+ - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
+ type: path
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ type: path
+ required: true
+ stage:
+ description:
+ - The name of the serverless framework project stage to deploy to.
+ - This uses the serverless framework default "dev".
+ type: str
+ functions:
+ description:
+ - A list of specific functions to deploy.
+ - If this is not provided, all functions in the service will be deployed.
+ type: list
+ elements: str
+ default: []
+ region:
+ description:
+ - AWS region to deploy the service to.
+ - This parameter defaults to C(us-east-1).
+ type: str
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them.
+ - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ type: bool
+ default: yes
+ force:
+ description:
+ - Whether or not to force full deployment, equivalent to serverless C(--force) option.
+ type: bool
+ default: no
+ verbose:
+ description:
+ - Shows all stack events during deployment, and display any Stack Output.
+ type: bool
+ default: no
+notes:
+ - Currently, the C(serverless) command must be in the path of the node executing the task.
+ In the future this may be a flag.
+requirements:
+- serverless
+- yaml
+author:
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = r'''
+- name: Basic deploy of a service
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+- name: Deploy specific functions
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ functions:
+ - my_func_one
+ - my_func_two
+
+- name: Deploy a project, then pull its resource list back into Ansible
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_info module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_info:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+
+- name: Deploy a project using a locally installed serverless binary
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ serverless_bin_path: node_modules/.bin/serverless
+'''
+
+RETURN = r'''
+service_name:
+ type: str
+ description: The service name specified in the serverless.yml that was just deployed.
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: str
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: str
+ description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+'''
+
+import os
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def read_serverless_config(module):
+ path = module.params.get('service_path')
+
+ try:
+ with open(os.path.join(path, 'serverless.yml')) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e)))
+
+ module.fail_json(msg="Failed to open serverless config at {0}".format(
+ os.path.join(path, 'serverless.yml')))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{0}-{1}".format(config['service'], stage)
+
+ return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ functions=dict(type='list', elements='str'),
+ region=dict(type='str', default=''),
+ stage=dict(type='str', default=''),
+ deploy=dict(type='bool', default=True),
+ serverless_bin_path=dict(type='path'),
+ force=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ ),
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg='yaml is required for this module')
+
+ service_path = module.params.get('service_path')
+ state = module.params.get('state')
+ functions = module.params.get('functions')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+ force = module.params.get('force', False)
+ verbose = module.params.get('verbose', False)
+ serverless_bin_path = module.params.get('serverless_bin_path')
+
+ if serverless_bin_path is not None:
+ command = serverless_bin_path + " "
+ else:
+ command = "serverless "
+
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
+
+ if state == 'present':
+ if not deploy:
+ command += '--noDeploy '
+ elif force:
+ command += '--force '
+
+ if region:
+ command += '--region {0} '.format(region)
+ if stage:
+ command += '--stage {0} '.format(stage)
+ if verbose:
+ command += '--verbose '
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{0}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py
new file mode 100644
index 00000000..58c6962b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_account_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_account)
+short_description: Manage SolidFire accounts
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+
+ new_name:
+ description:
+ - New name for the user account.
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ account_id:
+ description:
+ - The ID of the account to manage or update.
+
+ status:
+ description:
+ - Status of the account.
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+
+- name: Modify Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+ new_name: TenantA-Renamed
+
+- name: Delete Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: TenantA-Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireAccount(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=False, type='int', default=None),
+
+ new_name=dict(required=False, type='str', default=None),
+ initiator_secret=dict(required=False, type='str'),
+ target_secret=dict(required=False, type='str'),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+
+ self.new_name = p['new_name']
+ self.initiator_secret = p['initiator_secret']
+ self.target_secret = p['target_secret']
+ self.attributes = p['attributes']
+ self.status = p['status']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_account(self):
+ """
+ Return account object if found
+
+ :return: Details about the account. None if not found.
+ :rtype: dict
+ """
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ if account.username == self.name:
+ # Update self.account_id:
+ if self.account_id is not None:
+ if account.account_id == self.account_id:
+ return account
+ else:
+ self.account_id = account.account_id
+ return account
+ return None
+
+ def create_account(self):
+ try:
+ self.sfe.add_account(username=self.name,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.new_name,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ account_exists = False
+ update_account = False
+ account_detail = self.get_account()
+
+ if account_detail:
+ account_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if account_detail.username is not None and self.new_name is not None and \
+ account_detail.username != self.new_name:
+ update_account = True
+ changed = True
+
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not account_exists:
+ self.create_account()
+ elif update_account:
+ self.update_account()
+
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireAccount()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py
new file mode 100644
index 00000000..cfe24832
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_check_connections
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_check_connections)
+short_description: Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ community.general.sf_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.skip = p['skip']
+ self.mvip = p['mvip']
+ self.svip = p['svip']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_mvip(mvip=self.mvip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_svip(svip=self.svip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check(self):
+
+ failed = True
+ msg = ''
+
+ if self.skip is None:
+ mvip_connection_established = self.check_mvip_connection()
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ elif not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'mvip':
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'svip':
+ mvip_connection_established = self.check_mvip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ else:
+ failed = False
+
+ if failed:
+ self.module.fail_json(msg=msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ v = SolidFireConnection()
+ v.check()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py
new file mode 100644
index 00000000..296e50bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_snapshot_schedule_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_snapshot_schedule)
+short_description: Manage SolidFire snapshot schedules
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ required: false
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ required: false
+
+ time_interval_days:
+ description: Time interval in days.
+ required: false
+ default: 1
+
+ time_interval_hours:
+ description: Time interval in hours.
+ required: false
+ default: 0
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ required: false
+ default: 0
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ required: true
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ required: false
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - At least 1 volume ID is required for creating a new schedule.
+ - required when C(state=present)
+ required: false
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ required: false
+
+ schedule_id:
+ description:
+ - The schedule ID for the schedule that you want to update or delete.
+ required: false
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
+ - "Format: C(2016--12--01T00:00:00Z)"
+ required: false
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: Schedule_A
+ time_interval_days: 1
+ starting_date: 2016--12--01T00:00:00Z
+ volumes: 7
+
+ - name: Update Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ schedule_id: 6
+ recurring: True
+ snapshot_name: AnsibleSnapshots
+
+ - name: Delete Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ schedule_id: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireSnapShotSchedule(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ time_interval_days=dict(required=False, type='int', default=1),
+ time_interval_hours=dict(required=False, type='int', default=0),
+ time_interval_minutes=dict(required=False, type='int', default=0),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list'),
+ retention=dict(required=False, type='str'),
+
+ schedule_id=dict(type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['starting_date', 'volumes'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ # self.interval = p['interval']
+
+ self.time_interval_days = p['time_interval_days']
+ self.time_interval_hours = p['time_interval_hours']
+ self.time_interval_minutes = p['time_interval_minutes']
+
+ self.paused = p['paused']
+ self.recurring = p['recurring']
+
+ self.starting_date = p['starting_date']
+ if self.starting_date is not None:
+ self.starting_date = self.starting_date.replace("--", "-")
+
+ self.snapshot_name = p['snapshot_name']
+ self.volumes = p['volumes']
+ self.retention = p['retention']
+
+ self.schedule_id = p['schedule_id']
+
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_schedule(self):
+ schedule_list = self.sfe.list_schedules()
+ for schedule in schedule_list.schedules:
+ if schedule.name == self.name:
+ # Update self.schedule_id:
+ if self.schedule_id is not None:
+ if schedule.schedule_id == self.schedule_id:
+ return schedule
+ else:
+ self.schedule_id = schedule.schedule_id
+ return schedule
+
+ return None
+
+ def create_schedule(self):
+
+ try:
+ sched = netapp_utils.Schedule()
+ # if self.interval == 'time_interval':
+ sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ # Create schedule
+ sched.name = self.name
+ sched.schedule_info = netapp_utils.ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+
+ # Update schedule properties
+
+ # if self.interval == 'time_interval':
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if sched.frequency.days != temp_frequency.days or \
+ sched.frequency.hours != temp_frequency.hours \
+ or sched.frequency.minutes != temp_frequency.minutes:
+ sched.frequency = temp_frequency
+
+ sched.name = self.name
+ if self.volumes is not None:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ schedule_exists = False
+ update_schedule = False
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail:
+ schedule_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+
+ elif schedule_detail.name != self.name:
+ update_schedule = True
+ changed = True
+
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+
+ elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
+ update_schedule = True
+ changed = True
+
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+
+ elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
+ or self.time_interval_days is not None:
+
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours \
+ or schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ pass
+ else:
+ if self.state == 'present':
+ if not schedule_exists:
+ self.create_schedule()
+ elif update_schedule:
+ self.update_schedule()
+
+ elif self.state == 'absent':
+ self.delete_schedule()
+
+ if self.create_schedule_result is not None:
+ self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
+ else:
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireSnapShotSchedule()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py
new file mode 100644
index 00000000..78e3097d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_access_group_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_access_group)
+short_description: Manage SolidFire Volume Access Groups
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volume access groups on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume access group should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Name of the volume access group. It is not required to be unique, but recommended.
+ required: true
+
+ initiators:
+ description:
+ - List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+
+ virtual_network_id:
+ description:
+ - The ID of the SolidFire Virtual Network ID to associate the volume access group with.
+
+ virtual_network_tags:
+ description:
+ - The ID of the VLAN Virtual Network Tag to associate the volume access group with.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ volume_access_group_id:
+ description:
+ - The ID of the volume access group to modify or delete.
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVolumeAccessGroup
+ volumes: [7,8]
+
+ - name: Modify Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ volume_access_group_id: 1
+ name: AnsibleVolumeAccessGroup-Renamed
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Delete Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ volume_access_group_id: 1
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolumeAccessGroup(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ volume_access_group_id=dict(required=False, type='int', default=None),
+
+ initiators=dict(required=False, type='list', default=None),
+ volumes=dict(required=False, type='list', default=None),
+ virtual_network_id=dict(required=False, type='list', default=None),
+ virtual_network_tags=dict(required=False, type='list', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.volume_access_group_id = p['volume_access_group_id']
+
+ self.initiators = p['initiators']
+ self.volumes = p['volumes']
+ self.virtual_network_id = p['virtual_network_id']
+ self.virtual_network_tags = p['virtual_network_tags']
+ self.attributes = p['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume_access_group(self):
+ access_groups_list = self.sfe.list_volume_access_groups()
+
+ for group in access_groups_list.volume_access_groups:
+ if group.name == self.name:
+ # Update self.volume_access_group_id:
+ if self.volume_access_group_id is not None:
+ if group.volume_access_group_id == self.volume_access_group_id:
+ return group
+ else:
+ self.volume_access_group_id = group.volume_access_group_id
+ return group
+ return None
+
+ def create_volume_access_group(self):
+ try:
+ self.sfe.create_volume_access_group(name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_volume_access_group(self):
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_volume_access_group(self):
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ group_exists = False
+ update_group = False
+ group_detail = self.get_volume_access_group()
+
+ if group_detail:
+ group_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the group
+ if self.volumes is not None and group_detail.volumes != self.volumes:
+ update_group = True
+ changed = True
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ update_group = True
+ changed = True
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
+ self.attributes is not None:
+ update_group = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not group_exists:
+ self.create_volume_access_group()
+ elif update_group:
+ self.update_volume_access_group()
+
+ elif self.state == 'absent':
+ self.delete_volume_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireVolumeAccessGroup()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py
new file mode 100644
index 00000000..9d5378a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_volume)
+short_description: Manage SolidFire volumes
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volumes on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ required: true
+
+ 512emulation:
+ description:
+ - Should the volume provide 512-byte sector emulation?
+ - Required when C(state=present)
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+
+ volume_id:
+ description:
+ - The ID of the volume to manage or update.
+ - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
+ parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
+ volume (since it's an auto-generated property).
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ access:
+ description:
+ - "Access allowed for the volume."
+ - "readOnly: Only read operations are allowed."
+ - "readWrite: Reads and writes are allowed."
+ - "locked: No reads or writes are allowed."
+ - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
+ - "If unspecified, the access settings of the clone will be the same as the source."
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True, type='int'),
+
+ enable512e=dict(type='bool', aliases=['512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+
+ volume_id=dict(type='int', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+ self.enable512e = p['enable512e']
+ self.qos = p['qos']
+ self.attributes = p['attributes']
+
+ self.volume_id = p['volume_id']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = p['access']
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume(self):
+ """
+ Return volume object if found
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
+ for volume in volume_list.volumes:
+ if volume.name == self.name:
+ # Update self.volume_id
+ if self.volume_id is not None:
+ if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
+ return volume
+ else:
+ if str(volume.delete_time) == "":
+ self.volume_id = volume.volume_id
+ return volume
+ return None
+
+ def create_volume(self):
+ try:
+ self.sfe.create_volume(name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ qos=self.qos,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self):
+ try:
+ self.sfe.delete_volume(volume_id=self.volume_id)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
+ exception=to_native(err))
+
+ def update_volume(self):
+ try:
+ self.sfe.modify_volume(self.volume_id,
+ account_id=self.account_id,
+ access=self.access,
+ qos=self.qos,
+ total_size=self.size,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error updating volume %s" % self.name,
+ exception=to_native(err))
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ update_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.state == 'present':
+ if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.account_id is not None and self.account_id is not None \
+ and volume_detail.account_id != self.account_id:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.attributes is not None and self.attributes is not None and \
+ volume_detail.attributes != self.attributes:
+ update_volume = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+ result_message = "Volume created"
+ elif update_volume:
+ self.update_volume()
+ result_message = "Volume updated"
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ v = SolidFireVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py
new file mode 100644
index 00000000..ccb02a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: shutdown
+short_description: Shut down a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Shut downs a machine.
+version_added: "1.1.0"
+options:
+ delay:
+ description:
+ - Seconds to wait before shutdown. Passed as a parameter to the shutdown command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ msg:
+ description:
+ - Message to display to users before shutdown.
+ type: str
+ default: Shut down initiated by Ansible
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: path
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+
+seealso:
+- module: ansible.builtin.reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally shut down the machine with all defaults
+ community.general.shutdown:
+
+- name: Delay shutting down the remote node
+ community.general.shutdown:
+ delay: 60
+
+- name: Shut down a machine with shutdown command in unusual place
+ community.general.shutdown:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+shutdown:
+ description: C(true) if the machine has been shut down.
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py
new file mode 100644
index 00000000..22556d91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances.
+ - When created, optionally waits for it to be 'running'.
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option.
+ type: str
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance.
+ type: str
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance.
+ type: str
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed.
+ type: str
+ choices:
+ - ams01
+ - ams03
+ - che01
+ - dal01
+ - dal05
+ - dal06
+ - dal09
+ - dal10
+ - dal12
+ - dal13
+ - fra02
+ - fra04
+ - fra05
+ - hkg02
+ - hou02
+ - lon02
+ - lon04
+ - lon06
+ - mel01
+ - mex01
+ - mil01
+ - mon01
+ - osl01
+ - par01
+ - sao01
+ - sea01
+ - seo01
+ - sjc01
+ - sjc03
+ - sjc04
+ - sng01
+ - syd01
+ - syd04
+ - tok02
+ - tor01
+ - wdc01
+ - wdc04
+ - wdc06
+ - wdc07
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance.
+ type: str
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed.
+ type: bool
+ default: 'yes'
+ private:
+ description:
+ - Flag to determine if the instance should be private only.
+ type: bool
+ default: 'no'
+ dedicated:
+ description:
+ - Flag to determine if the instance should be deployed in dedicated space.
+ type: bool
+ default: 'no'
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance.
+ type: bool
+ default: 'yes'
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance.
+ type: int
+ choices: [1, 2, 4, 8, 16, 32, 56]
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance.
+ type: int
+ choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+ flavor:
+ description:
+ - Specify which SoftLayer flavor template to use instead of cpus and memory.
+ version_added: '0.2.0'
+ type: str
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance.
+ default: [ 25 ]
+ type: list
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance.
+ type: str
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance.
+ type: str
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance.
+ choices: [10, 100, 1000]
+ type: int
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC.
+ type: str
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC.
+ type: str
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance.
+ type: list
+ post_uri:
+ description:
+ - URL of a post provisioning script to be loaded and executed on virtual instance.
+ type: str
+ state:
+ description:
+ - Create, or cancel a virtual instance.
+ - Specify C(present) for create, C(absent) to cancel.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ wait:
+ description:
+ - Flag used to wait for active status before returning.
+ type: bool
+ default: 'yes'
+ wait_time:
+ description:
+ - Time in seconds before wait returns.
+ default: 600
+ type: int
+requirements:
+ - python >= 2.6
+ - softlayer >= 4.1.1
+author:
+- Matt Colton (@mcltn)
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instance request
+ community.general.sl_vm:
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: no
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instances request
+ community.general.sl_vm:
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - hostname: instance-2
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: True
+ - hostname: instance-3
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: yes
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Cancel by tag
+ community.general.sl_vm:
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import json
+import time
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+# TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
+ 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
+ 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ datacenter=module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ cpus=module.params.get('cpus'),
+ memory=module.params.get('memory'),
+ flavor=module.params.get('flavor'),
+ hourly=module.params.get('hourly'),
+ datacenter=module.params.get('datacenter'),
+ os_code=module.params.get('os_code'),
+ image_id=module.params.get('image_id'),
+ local_disk=module.params.get('local_disk'),
+ disks=module.params.get('disks'),
+ ssh_keys=module.params.get('ssh_keys'),
+ nic_speed=module.params.get('nic_speed'),
+ private=module.params.get('private'),
+ public_vlan=module.params.get('public_vlan'),
+ private_vlan=module.params.get('private_vlan'),
+ dedicated=module.params.get('dedicated'),
+ post_uri=module.params.get('post_uri'),
+ tags=tags,
+ )
+
+ if instance is not None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module, id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except Exception:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, string_types):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str'),
+ hostname=dict(type='str'),
+ domain=dict(type='str'),
+ datacenter=dict(type='str', choices=DATACENTERS),
+ tags=dict(type='str'),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ flavor=dict(type='str'),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(type='str'),
+ image_id=dict(type='str'),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(type='str'),
+ private_vlan=dict(type='str'),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(type='str'),
+ state=dict(type='str', default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600),
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') is True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py
new file mode 100644
index 00000000..946fc9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py
@@ -0,0 +1,487 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Lee Goolsbee <lgoolsbee@atlassian.com>
+# (c) 2020, Michal Middleton <mm.404@icloud.com>
+# (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# (c) 2016, René Moser <mail@renemoser.net>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ token:
+ type: str
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Make sure to use the correct type of token, depending on what method you use.
+ - "Webhook token:
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working. ** Please keep in mind the tokens
+ are not the API tokens but are the webhook tokens. In slack these are
+ found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may
+ be locked by your Slack admin and you must request access. It is there
+ that the incoming webhooks can be added. The key is on the end of the
+ URL given to you in that section."
+ - "WebAPI token:
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
+ or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to send. Note that the module does not handle escaping characters.
+ Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &amp;) before sending.
+ See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ thread_id:
+ description:
+ - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
+ type: str
+ message_id:
+ description:
+ - Optional. Message ID to edit, instead of posting a new message.
+ Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
+ type: str
+ version_added: 1.2.0
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
+ default: https://www.ansible.com/favicon.ico
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ type: str
+ description:
+ - Setting for the message parser at Slack
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
+ - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
+ - Specifying value in hex is supported since Ansible 2.8.
+ default: 'normal'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/docs/attachments).
+ blocks:
+ description:
+ - Define a list of blocks. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/block-kit).
+ type: list
+ elements: dict
+ version_added: 1.0.0
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Slack all options
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ channel: '#ansible'
+ thread_id: '1539917263.000100'
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ parse: 'none'
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+
+- name: Insert a color bar in front of the message with valid hex color value
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: 'This message uses color in hex value'
+ color: '#00aacc'
+ username: ''
+ icon_url: ''
+
+- name: Use the attachments API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+
+- name: Use the blocks API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ blocks:
+ - type: section
+ text:
+ type: mrkdwn
+ text: |-
+ *System load*
+ Display my system load on host A and B
+ - type: context
+ elements:
+ - type: mrkdwn
+ text: |-
+ *System A*
+ load average: 0,74, 0,66, 0,63
+ - type: mrkdwn
+ text: |-
+ *System B*
+ load average: 5,16, 4,64, 2,43
+
+- name: Send a message with a link using Slack markup
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: We sent this message using <https://www.ansible.com|Ansible>!
+
+- name: Send a message with angle brackets and ampersands
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: This message has &lt;brackets&gt; &amp; ampersands in plain text.
+
+- name: Initial Threaded Slack message
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ msg: 'Starting a thread with my initial post.'
+ register: slack_response
+- name: Add more info to thread
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ thread_id: "{{ slack_response['ts'] }}"
+ color: good
+ msg: 'And this is my threaded response!'
+
+- name: Send a message to be edited later on
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: '#ansible'
+ msg: Deploying something...
+ register: slack_response
+- name: Edit message
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: "{{ slack_response.channel }}"
+ msg: Deployment complete!
+ message_id: "{{ slack_response.ts }}"
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+
+# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
+# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
+escape_table = {
+ '"': "\"",
+ "'": "\'",
+}
+
+
+def is_valid_hex_color(color_choice):
+ if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice):
+ return True
+ return False
+
+
+def escape_quotes(text):
+ '''Backslash any quotes within text.'''
+ return "".join(escape_table.get(c, c) for c in text)
+
+
+def recursive_escape_quotes(obj, keys):
+ '''Recursively escape quotes inside supplied keys inside block kit objects'''
+ if isinstance(obj, dict):
+ escaped = {}
+ for k, v in obj.items():
+ if isinstance(v, str) and k in keys:
+ escaped[k] = escape_quotes(v)
+ else:
+ escaped[k] = recursive_escape_quotes(v, keys)
+ elif isinstance(obj, list):
+ escaped = [recursive_escape_quotes(v, keys) for v in obj]
+ else:
+ escaped = obj
+ return escaped
+
+
+def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=escape_quotes(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if channel.startswith(('#', '@', 'C0')):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if thread_id is not None:
+ payload['thread_ts'] = thread_id
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+ if message_id is not None:
+ payload['ts'] = message_id
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ attachment_keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in attachment_keys_to_escape:
+ if key in attachment:
+ attachment[key] = escape_quotes(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ if blocks is not None:
+ block_keys_to_escape = [
+ 'text',
+ 'alt_text'
+ ]
+ payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape)
+
+ return payload
+
+
+def get_slack_message(module, domain, token, channel, ts):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + token
+ }
+ qs = urlencode({
+ 'channel': channel,
+ 'ts': ts,
+ 'limit': 1,
+ 'inclusive': 'true',
+ })
+ url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to get slack message")
+ data = module.from_json(response.read())
+ if len(data['messages']) < 1:
+ module.fail_json(msg="no messages matching ts: %s" % ts)
+ if len(data['messages']) > 1:
+ module.fail_json(msg="more than 1 message matching ts: %s" % ts)
+ return data['messages'][0]
+
+
+def do_notify_slack(module, domain, token, payload):
+ use_webapi = False
+ if token.count('/') >= 2:
+ # New style webhook token
+ slack_uri = SLACK_INCOMING_WEBHOOK % (token)
+ elif re.match(r'^xox[abp]-\S+$', token):
+ slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ use_webapi = True
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form "
+ "XXXX/YYYY/ZZZZ in your playbook")
+ slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+ if use_webapi:
+ headers['Authorization'] = 'Bearer ' + token
+
+ data = module.jsonify(payload)
+ response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data)
+
+ if info['status'] != 200:
+ if use_webapi:
+ obscured_incoming_webhook = slack_uri
+ else:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
+
+ # each API requires different handling
+ if use_webapi:
+ return module.from_json(response.read())
+ else:
+ return {'webhook': 'ok'}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=False, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ thread_id=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal'),
+ attachments=dict(type='list', required=False, default=None),
+ blocks=dict(type='list', elements='dict'),
+ message_id=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ thread_id = module.params['thread_id']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+ blocks = module.params['blocks']
+ message_id = module.params['message_id']
+
+ color_choices = ['normal', 'good', 'warning', 'danger']
+ if color not in color_choices and not is_valid_hex_color(color):
+ module.fail_json(msg="Color value specified should be either one of %r "
+ "or any valid hex value with length 3 or 6." % color_choices)
+
+ changed = True
+
+ # if updating an existing message, we can check if there's anything to update
+ if message_id is not None:
+ changed = False
+ msg = get_slack_message(module, domain, token, channel, message_id)
+ for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
+ if msg.get(key) != module.params.get(key):
+ changed = True
+ break
+ # if check mode is active, we shouldn't do anything regardless.
+ # if changed=False, we don't need to do anything, so don't do it.
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel'])
+ elif module.check_mode:
+ module.exit_json(changed=changed)
+
+ payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id)
+ slack_response = do_notify_slack(module, domain, token, payload)
+
+ if 'ok' in slack_response:
+ # Evaluate WebAPI response
+ if slack_response['ok']:
+ # return payload as a string for backwards compatibility
+ payload_json = module.jsonify(payload)
+ module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'],
+ api=slack_response, payload=payload_json)
+ else:
+ module.fail_json(msg="Slack API error", error=slack_response['error'])
+ else:
+ # Exit with plain OK from WebHook, since we don't have more information
+ # If we get 200 from webhook, the only answer is OK
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py
new file mode 100644
index 00000000..424f5b1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ type: list
+ elements: str
+ aliases: [pkg]
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
+ required: false
+ default: present
+ type: str
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ type: bool
+ aliases: [update-cache]
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.slackpkg:
+ name: foo
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.slackpkg:
+ name: foo,bar
+ state: absent
+
+- name: Make sure that it is the most updated package
+ community.general.slackpkg:
+ name: foo
+ state: latest
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, slackpkg_path, name):
+
+ import platform
+ import os
+ import re
+
+ machine = platform.machine()
+ # Exception for kernel-headers package on x86_64
+ if name == 'kernel-headers' and machine == 'x86_64':
+ machine = 'x86'
+ pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
+ packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ update_cache=dict(default=False, aliases=["update-cache"],
+ type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Å tevko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Å tevko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py
new file mode 100644
index 00000000..9776b4e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
+# Copyright: (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap
+
+short_description: Manages snaps
+
+
+description:
+ - "Manages snaps packages."
+
+options:
+ name:
+ description:
+ - Name of the snap to install or remove. Can be a list of snaps.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: present
+ choices: [ absent, present ]
+ type: str
+ classic:
+ description:
+ - Confinement policy. The classic confinement allows a snap to have
+ the same level of access to the system as "classic" packages,
+ like those managed by APT. This option corresponds to the --classic argument.
+ This option can only be specified if there is a single snap in the task.
+ type: bool
+ required: false
+ default: no
+ channel:
+ description:
+ - Define which release of a snap is installed and tracked for updates.
+ This option can only be specified if there is a single snap in the task.
+ type: str
+ required: false
+ default: stable
+
+author:
+ - Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
+ - Stanislas Lange (@angristan) <angristan@pm.me>
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Install foo
+ community.general.snap:
+ name:
+ - foo
+ - bar
+
+# Remove "foo" snap
+- name: Remove foo
+ community.general.snap:
+ name: foo
+ state: absent
+
+# Install a snap with classic confinement
+- name: Install "foo" with option --classic
+ community.general.snap:
+ name: foo
+ classic: yes
+
+# Install a snap with from a specific channel
+- name: Install "foo" with option --channel=latest/edge
+ community.general.snap:
+ name: foo
+ channel: latest/edge
+'''
+
+RETURN = '''
+classic:
+ description: Whether or not the snaps were installed with the classic confinement
+ type: bool
+ returned: When snaps are installed
+channel:
+ description: The channel the snaps were installed from
+ type: str
+ returned: When snaps are installed
+cmd:
+ description: The command that was executed on the host
+ type: str
+ returned: When changed is true
+snaps_installed:
+ description: The list of actually installed snaps
+ type: list
+ returned: When any snaps have been installed
+snaps_removed:
+ description: The list of actually removed snaps
+ type: list
+ returned: When any snaps have been removed
+'''
+
+import operator
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def validate_input_snaps(module):
+ """Ensure that all exist."""
+ for snap_name in module.params['name']:
+ if not snap_exists(module, snap_name):
+ module.fail_json(msg="No snap matching '%s' available." % snap_name)
+
+
+def snap_exists(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'info', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def is_snap_installed(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'list', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def get_snap_for_action(module):
+ """Construct a list of snaps to use for current action."""
+ snaps = module.params['name']
+
+ is_present_state = module.params['state'] == 'present'
+ negation_predicate = operator.not_ if is_present_state else bool
+
+ def predicate(s):
+ return negation_predicate(is_snap_installed(module, s))
+
+ return [s for s in snaps if predicate(s)]
+
+
+def get_base_cmd_parts(module):
+ action_map = {
+ 'present': 'install',
+ 'absent': 'remove',
+ }
+
+ state = module.params['state']
+
+ classic = ['--classic'] if module.params['classic'] else []
+ channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else []
+
+ snap_path = module.get_bin_path("snap", True)
+ snap_action = action_map[state]
+
+ cmd_parts = [snap_path, snap_action]
+ if snap_action == 'install':
+ cmd_parts += classic + channel
+
+ return cmd_parts
+
+
+def get_cmd_parts(module, snap_names):
+ """Return list of cmds to run in exec format."""
+ is_install_mode = module.params['state'] == 'present'
+ has_multiple_snaps = len(snap_names) > 1
+
+ cmd_parts = get_base_cmd_parts(module)
+ has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts
+
+ if not (is_install_mode and has_one_pkg_params and has_multiple_snaps):
+ return [cmd_parts + snap_names]
+
+ return [cmd_parts + [s] for s in snap_names]
+
+
+def run_cmd_for(module, snap_names):
+ cmds_parts = get_cmd_parts(module, snap_names)
+ cmd = '; '.join(' '.join(c) for c in cmds_parts)
+ cmd = 'sh -c "{0}"'.format(cmd)
+
+ # Actually execute the snap command
+ return (cmd, ) + module.run_command(cmd, check_rc=False)
+
+
+def execute_action(module):
+ is_install_mode = module.params['state'] == 'present'
+ exit_kwargs = {
+ 'classic': module.params['classic'],
+ 'channel': module.params['channel'],
+ } if is_install_mode else {}
+
+ actionable_snaps = get_snap_for_action(module)
+ if not actionable_snaps:
+ module.exit_json(changed=False, **exit_kwargs)
+
+ changed_def_args = {
+ 'changed': True,
+ 'snaps_{result}'.
+ format(result='installed' if is_install_mode
+ else 'removed'): actionable_snaps,
+ }
+
+ if module.check_mode:
+ module.exit_json(**dict(changed_def_args, **exit_kwargs))
+
+ cmd, rc, out, err = run_cmd_for(module, actionable_snaps)
+ cmd_out_args = {
+ 'cmd': cmd,
+ 'rc': rc,
+ 'stdout': out,
+ 'stderr': err,
+ }
+
+ if rc == 0:
+ module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs)))
+ else:
+ msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd)
+ if is_install_mode:
+ m = re.match(r'^error: This revision of snap "(?P<package_name>\w+)" was published using classic confinement', err)
+ if m is not None:
+ err_pkg = m.group('package_name')
+ msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg)
+ module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs))
+
+
+def main():
+ module_args = {
+ 'name': dict(type='list', elements='str', required=True),
+ 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']),
+ 'classic': dict(type='bool', required=False, default=False),
+ 'channel': dict(type='str', required=False, default='stable'),
+ }
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ validate_input_snaps(module)
+
+ # Apply changes to the snaps
+ execute_action(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py
new file mode 100644
index 00000000..661db460
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Networklore's snmp library for Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snmp_facts
+author:
+- Patrick Ogenstad (@ogenstad)
+short_description: Retrieve facts for a device using SNMP
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target SNMP server (normally C({{ inventory_hostname }})).
+ type: str
+ required: true
+ version:
+ description:
+ - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ type: str
+ required: true
+ choices: [ v2, v2c, v3 ]
+ community:
+ description:
+ - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ type: str
+ level:
+ description:
+ - Authentication level.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ authNoPriv, authPriv ]
+ username:
+ description:
+ - Username for SNMPv3.
+ - Required if I(version) is C(v3).
+ type: str
+ integrity:
+ description:
+ - Hashing algorithm.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ md5, sha ]
+ authkey:
+ description:
+ - Authentication key.
+ - Required I(version) is C(v3).
+ type: str
+ privacy:
+ description:
+ - Encryption algorithm.
+ - Required if I(level) is C(authPriv).
+ type: str
+ choices: [ aes, des ]
+ privkey:
+ description:
+ - Encryption key.
+ - Required if I(level) is C(authPriv).
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Gather facts with SNMP version 2
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v2c
+ community: public
+ delegate_to: local
+
+- name: Gather facts using SNMP version 3
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+ansible_sysdescr:
+ description: A textual description of the entity.
+ returned: success
+ type: str
+ sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ansible_sysobjectid:
+ description: The vendor's authoritative identification of the network management subsystem contained in the entity.
+ returned: success
+ type: str
+ sample: 1.3.6.1.4.1.8072.3.2.10
+ansible_sysuptime:
+ description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized.
+ returned: success
+ type: int
+ sample: 42388
+ansible_syscontact:
+ description: The textual identification of the contact person for this managed node, together with information on how to contact this person.
+ returned: success
+ type: str
+ sample: Me <me@example.org>
+ansible_sysname:
+ description: An administratively-assigned name for this managed node.
+ returned: success
+ type: str
+ sample: ubuntu-user
+ansible_syslocation:
+ description: The physical location of this node (e.g., `telephone closet, 3rd floor').
+ returned: success
+ type: str
+ sample: Sitting on the Dock of the Bay
+ansible_all_ipv4_addresses:
+ description: List of all IPv4 addresses.
+ returned: success
+ type: list
+ sample: ["127.0.0.1", "172.17.0.1"]
+ansible_interfaces:
+ description: Dictionary of each network interface and its metadata.
+ returned: success
+ type: dict
+ sample: {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
+ }
+'''
+
+import binascii
+import traceback
+from collections import defaultdict
+
+PYSNMP_IMP_ERR = None
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pysnmp.proto.rfc1905 import EndOfMibView
+ HAS_PYSNMP = True
+except Exception:
+ PYSNMP_IMP_ERR = traceback.format_exc()
+ HAS_PYSNMP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_text
+
+
+class DefineOid(object):
+
+ def __init__(self, dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return to_text(binascii.unhexlify(hexstring[2:]))
+ return hexstring
+
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ return hexstring
+
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options:
+ return adminstatus_options[int_adminstatus]
+ return ""
+
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options:
+ return operstatus_options[int_operstatus]
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(type='str'),
+ username=dict(type='str'),
+ level=dict(type='str', choices=['authNoPriv', 'authPriv']),
+ integrity=dict(type='str', choices=['md5', 'sha']),
+ privacy=dict(type='str', choices=['aes', 'des']),
+ authkey=dict(type='str', no_log=True),
+ privkey=dict(type='str', no_log=True),
+ ),
+ required_together=(
+ ['username', 'level', 'integrity', 'authkey'],
+ ['privacy', 'privkey'],
+ ),
+ supports_check_mode=False,
+ )
+
+ m_args = module.params
+
+ if not HAS_PYSNMP:
+ module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR)
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] in ("v2", "v2c"):
+ if m_args['community'] is None:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] is None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] in ("v2", "v2c"):
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ def Tree():
+ return defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ if isinstance(val, EndOfMibView):
+ continue
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifSpeed in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if current_interface not in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py
new file mode 100644
index 00000000..8ecdeb8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones.
+ - This module does not currently allow changing of options for a zone that is already been created.
+author:
+- Paul Markham (@pmarkham)
+requirements:
+ - Solaris 10 or 11
+options:
+ state:
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ type: str
+ choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
+ default: present
+ name:
+ description:
+ - Zone name.
+ - A zone name must be unique name.
+ - A zone name must begin with an alpha-numeric character.
+ - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - The name cannot be longer than 64 characters.
+ type: str
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ type: str
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ type: bool
+ default: no
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ type: str
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ type: str
+ default: ''
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ type: str
+ default: ''
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ type: str
+ default: ''
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ type: str
+ default: ''
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ type: int
+ default: 600
+'''
+
+EXAMPLES = '''
+- name: Create and install a zone, but don't boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: True
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Create and install a zone and boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Boot an already installed zone
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+
+- name: Stop a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: stopped
+
+- name: Destroy a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: absent
+
+- name: Detach a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: detached
+
+- name: Configure a zone, ready to be attached
+ community.general.solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Attach zone1
+ community.general.solaris_zone:
+ name: zone1
+ state: attached
+ attach_options: -u
+'''
+
+import os
+import platform
+import re
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
+ if not match:
+ self.module.fail_json(msg="Provided zone name is not a valid zone name. "
+ "Please refer documentation for correct zone name specifications.")
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
+ path=dict(type='str'),
+ sparse=dict(type='bool', default=False),
+ root_password=dict(type='str', no_log=True),
+ timeout=dict(type='int', default=600),
+ config=dict(type='str', default=''),
+ create_options=dict(type='str', default=''),
+ install_options=dict(type='str', default=''),
+ attach_options=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py
new file mode 100644
index 00000000..347413fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sorcery
+short_description: Package manager for Source Mage GNU/Linux
+description:
+ - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
+author: "Vlad Glagolev (@vaygr)"
+notes:
+ - When all three components are selected, the update goes by the sequence --
+ Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
+ - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
+ yet supported.
+requirements:
+ - bash
+options:
+ name:
+ description:
+ - Name of the spell
+ - multiple names can be given, separated by commas
+ - special value '*' in conjunction with states C(latest) or
+ C(rebuild) will update or rebuild the whole system respectively
+ aliases: ["spell"]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to cast, dispel or rebuild a package
+ - state C(cast) is an equivalent of C(present), not C(latest)
+ - state C(latest) always triggers C(update_cache=yes)
+ - state C(rebuild) implies cast of all specified spells, not only
+ those existed before
+ choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
+ default: "present"
+ type: str
+
+ depends:
+ description:
+ - Comma-separated list of _optional_ dependencies to build a spell
+ (or make sure it is built) with; use +/- in front of dependency
+ to turn it on/off ('+' is optional though)
+ - this option is ignored if C(name) parameter is equal to '*' or
+ contains more than one spell
+ - providers must be supplied in the form recognized by Sorcery, e.g.
+ 'openssl(SSL)'
+ type: str
+
+ update:
+ description:
+ - Whether or not to update sorcery scripts at the very first stage
+ type: bool
+ default: no
+
+ update_cache:
+ description:
+ - Whether or not to update grimoire collection before casting spells
+ type: bool
+ default: no
+ aliases: ["update_codex"]
+
+ cache_valid_time:
+ description:
+ - Time in seconds to invalidate grimoire collection on update
+ - especially useful for SCM and rsync grimoires
+ - makes sense only in pair with C(update_cache)
+ type: int
+'''
+
+
+EXAMPLES = '''
+- name: Make sure spell foo is installed
+ community.general.sorcery:
+ spell: foo
+ state: present
+
+- name: Make sure spells foo, bar and baz are removed
+ community.general.sorcery:
+ spell: foo,bar,baz
+ state: absent
+
+- name: Make sure spell foo with dependencies bar and baz is installed
+ community.general.sorcery:
+ spell: foo
+ depends: bar,baz
+ state: present
+
+- name: Make sure spell foo with bar and without baz dependencies is installed
+ community.general.sorcery:
+ spell: foo
+ depends: +bar,-baz
+ state: present
+
+- name: Make sure spell foo with libressl (providing SSL) dependency is installed
+ community.general.sorcery:
+ spell: foo
+ depends: libressl(SSL)
+ state: present
+
+- name: Make sure spells with/without required dependencies (if any) are installed
+ community.general.sorcery:
+ name: "{{ item.spell }}"
+ depends: "{{ item.depends | default(None) }}"
+ state: present
+ loop:
+ - { spell: 'vifm', depends: '+file,-gtk+2' }
+ - { spell: 'fwknop', depends: 'gpgme' }
+ - { spell: 'pv,tnftp,tor' }
+
+- name: Install the latest version of spell foo using regular glossary
+ community.general.sorcery:
+ name: foo
+ state: latest
+
+- name: Rebuild spell foo
+ community.general.sorcery:
+ spell: foo
+ state: rebuild
+
+- name: Rebuild the whole system, but update Sorcery and Codex first
+ community.general.sorcery:
+ spell: '*'
+ state: rebuild
+ update: yes
+ update_cache: yes
+
+- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias
+ community.general.sorcery:
+ update_codex: yes
+ cache_valid_time: 86400
+
+- name: Update only Sorcery itself
+ community.general.sorcery:
+ update: yes
+'''
+
+
+RETURN = '''
+'''
+
+
+import datetime
+import fileinput
+import os
+import re
+import shutil
+import sys
+
+
+# auto-filled at module init
+SORCERY = {
+ 'sorcery': None,
+ 'scribe': None,
+ 'cast': None,
+ 'dispel': None,
+ 'gaze': None
+}
+
+SORCERY_LOG_DIR = "/var/log/sorcery"
+SORCERY_STATE_DIR = "/var/state/sorcery"
+
+
+def get_sorcery_ver(module):
+ """ Get Sorcery version. """
+
+ cmd_sorcery = "%s --version" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0 or not stdout:
+ module.fail_json(msg="unable to get Sorcery version")
+
+ return stdout.strip()
+
+
+def codex_fresh(codex, module):
+ """ Check if grimoire collection is fresh enough. """
+
+ if not module.params['cache_valid_time']:
+ return False
+
+ timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
+
+ for grimoire in codex:
+ lastupdate_path = os.path.join(SORCERY_STATE_DIR,
+ grimoire + ".lastupdate")
+
+ try:
+ mtime = os.stat(lastupdate_path).st_mtime
+ except Exception:
+ return False
+
+ lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
+
+ # if any grimoire is not fresh, we invalidate the Codex
+ if lastupdate_ts + timedelta < datetime.datetime.now():
+ return False
+
+ return True
+
+
+def codex_list(module):
+ """ List valid grimoire collection. """
+
+ codex = {}
+
+ cmd_scribe = "%s index" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to list grimoire collection, fix your Codex")
+
+ rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
+
+ # drop 4-line header and empty trailing line
+ for line in stdout.splitlines()[4:-1]:
+ match = rex.match(line)
+
+ if match:
+ codex[match.group('grim')] = match.group('ver')
+
+ if not codex:
+ module.fail_json(msg="no grimoires to operate on; add at least one")
+
+ return codex
+
+
+def update_sorcery(module):
+ """ Update sorcery scripts.
+
+ This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
+ positive change value.
+
+ """
+
+ changed = False
+
+ if module.check_mode:
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=True, msg="would have updated Sorcery")
+ else:
+ sorcery_ver = get_sorcery_ver(module)
+
+ cmd_sorcery = "%s update" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Sorcery: " + stdout)
+
+ if sorcery_ver != get_sorcery_ver(module):
+ changed = True
+
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Sorcery")
+
+
+def update_codex(module):
+ """ Update grimoire collections.
+
+ This runs 'scribe update'. Check mode always returns a positive change
+ value when 'cache_valid_time' is used.
+
+ """
+
+ params = module.params
+
+ changed = False
+
+ codex = codex_list(module)
+ fresh = codex_fresh(codex, module)
+
+ if module.check_mode:
+ if not params['name']:
+ if not fresh:
+ changed = True
+
+ module.exit_json(changed=changed, msg="would have updated Codex")
+ elif not fresh or params['name'] and params['state'] == 'latest':
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_scribe = "%s update" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
+
+ if codex != codex_list(module):
+ changed = True
+
+ if not params['name']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Codex")
+
+
+def match_depends(module):
+ """ Check for matching dependencies.
+
+ This inspects spell's dependencies with the desired states and returns
+ 'False' if a recast is needed to match them. It also adds required lines
+ to the system-wide depends file for proper recast procedure.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ depends = {}
+
+ depends_ok = True
+
+ if len(spells) > 1 or not params['depends']:
+ return depends_ok
+
+ spell = spells[0]
+
+ if module.check_mode:
+ sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
+
+ try:
+ shutil.copy2(sorcery_depends_orig, sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to copy depends.check file")
+ else:
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
+
+ rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
+
+ for d in params['depends'].split(','):
+ match = rex.match(d)
+
+ if not match:
+ module.fail_json(msg="wrong depends line for spell '%s'" % spell)
+
+ # normalize status
+ if not match.group('status') or match.group('status') == '+':
+ status = 'on'
+ else:
+ status = 'off'
+
+ depends[match.group('depend')] = status
+
+ # drop providers spec
+ depends_list = [s.split('(')[0] for s in depends]
+
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ if rc != 0:
+ module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
+
+ fi = fileinput.input(sorcery_depends, inplace=True)
+
+ try:
+ try:
+ for line in fi:
+ if line.startswith(spell + ':'):
+ match = None
+
+ for d in depends:
+ # when local status is 'off' and dependency is provider,
+ # use only provider value
+ d_offset = d.find('(')
+
+ if d_offset == -1:
+ d_p = ''
+ else:
+ d_p = re.escape(d[d_offset:])
+
+ # .escape() is needed mostly for the spells like 'libsigc++'
+ rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
+ (re.escape(spell), re.escape(d), d_p))
+
+ match = rex.match(line)
+
+ # we matched the line "spell:dependency:on|off:optional:"
+ if match:
+ # if we also matched the local status, mark dependency
+ # as empty and put it back into depends file
+ if match.group('lstatus') == depends[d]:
+ depends[d] = None
+
+ sys.stdout.write(line)
+
+ # status is not that we need, so keep this dependency
+ # in the list for further reverse switching;
+ # stop and process the next line in both cases
+ break
+
+ if not match:
+ sys.stdout.write(line)
+ else:
+ sys.stdout.write(line)
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fi.close()
+
+ depends_new = [v for v in depends if depends[v]]
+
+ if depends_new:
+ try:
+ try:
+ fl = open(sorcery_depends, 'a')
+
+ for k in depends_new:
+ fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fl.close()
+
+ depends_ok = False
+
+ if module.check_mode:
+ try:
+ os.remove(sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to clean up depends.backup file")
+
+ return depends_ok
+
+
+def manage_spells(module):
+ """ Cast or dispel spells.
+
+ This manages the whole system ('*'), list or a single spell. Command 'cast'
+ is used to install or rebuild spells, while 'dispel' takes care of theirs
+ removal from the system.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
+
+ if spells == '*':
+ if params['state'] == 'latest':
+ # back up original queue
+ try:
+ os.rename(sorcery_queue, sorcery_queue + ".backup")
+ except IOError:
+ module.fail_json(msg="failed to backup the update queue")
+
+ # see update_codex()
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_sorcery = "%s queue"
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to generate the update queue")
+
+ try:
+ queue_size = os.stat(sorcery_queue).st_size
+ except Exception:
+ module.fail_json(msg="failed to read the update queue")
+
+ if queue_size != 0:
+ if module.check_mode:
+ try:
+ os.rename(sorcery_queue + ".backup", sorcery_queue)
+ except IOError:
+ module.fail_json(msg="failed to restore the update queue")
+
+ module.exit_json(changed=True, msg="would have updated the system")
+
+ cmd_cast = "%s --queue" % SORCERY['cast']
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to update the system")
+
+ module.exit_json(changed=True, msg="successfully updated the system")
+ else:
+ module.exit_json(changed=False, msg="the system is already up to date")
+ elif params['state'] == 'rebuild':
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have rebuilt the system")
+
+ cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to rebuild the system: " + stdout)
+
+ module.exit_json(changed=True, msg="successfully rebuilt the system")
+ else:
+ module.fail_json(msg="unsupported operation on '*' name value")
+ else:
+ if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
+ # extract versions from the 'gaze' command
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ # fail if any of spells cannot be found
+ if rc != 0:
+ module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
+ ', '.join(spells))
+
+ cast_queue = []
+ dispel_queue = []
+
+ rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
+
+ # drop 2-line header and empty trailing line
+ for line in stdout.splitlines()[2:-1]:
+ match = rex.match(line)
+
+ cast = False
+
+ if params['state'] == 'present':
+ # spell is not installed..
+ if match.group('inst_ver') == '-':
+ # ..so set up depends reqs for it
+ match_depends(module)
+
+ cast = True
+ # spell is installed..
+ else:
+ # ..but does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'latest':
+ # grimoire and installed versions do not match..
+ if match.group('grim_ver') != match.group('inst_ver'):
+ # ..so check for depends reqs first and set them up
+ match_depends(module)
+
+ cast = True
+ # grimoire and installed versions match..
+ else:
+ # ..but the spell does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'rebuild':
+ cast = True
+ # 'absent'
+ else:
+ if match.group('inst_ver') != '-':
+ dispel_queue.append(match.group('spell'))
+
+ if cast:
+ cast_queue.append(match.group('spell'))
+
+ if cast_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have cast spell(s)")
+
+ cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully cast spell(s)")
+ elif params['state'] != 'absent':
+ module.exit_json(changed=False, msg="spell(s) are already cast")
+
+ if dispel_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have dispelled spell(s)")
+
+ cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_dispel)
+
+ if rc != 0:
+ module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ else:
+ module.exit_json(changed=False, msg="spell(s) are already dispelled")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
+ ),
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
+ )
+
+ if os.geteuid() != 0:
+ module.fail_json(msg="root privileges are required for this operation")
+
+ for c in SORCERY:
+ SORCERY[c] = module.get_bin_path(c, True)
+
+ # prepare environment: run sorcery commands without asking questions
+ module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
+
+ params = module.params
+
+ # normalize 'state' parameter
+ if params['state'] in ('present', 'cast'):
+ params['state'] = 'present'
+ elif params['state'] in ('absent', 'dispelled'):
+ params['state'] = 'absent'
+
+ if params['update']:
+ update_sorcery(module)
+
+ if params['update_cache'] or params['state'] == 'latest':
+ update_codex(module)
+
+ if params['name']:
+ manage_spells(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py
new file mode 100644
index 00000000..80c1c493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_access_key
+short_description: Manages Bitbucket repository access keys
+description:
+ - Manages Bitbucket repository access keys (also called deploy keys).
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public key.
+ type: str
+ label:
+ description:
+ - The key label.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the access key.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Bitbucket OAuth consumer should have permissions to read and administrate account repositories.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create access key
+ community.general.bitbucket_access_key:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ label: 'Bitbucket'
+ state: present
+
+- name: Delete access key
+ community.general.bitbucket_access_key:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ label: Bitbucket
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_key': '`key` is required when the `state` is `present`',
+ 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
+ 'invalid_username_or_repo': 'Invalid `repository` or `username`',
+ 'invalid_key': 'Invalid SSH key or key is already in use',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_deploy_key(module, bitbucket):
+ """
+ Search for an existing deploy key on Bitbucket
+ with the label specified in module param `label`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing deploy key or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through the all response pages in search of deploy key we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
+
+ res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
+
+ if res is not None:
+ return res
+
+ return None
+
+
+def create_deploy_key(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['key'],
+ 'label': module.params['label'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] == 400:
+ module.fail_json(msg=error_messages['invalid_key'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def delete_deploy_key(module, bitbucket, key_id):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ key_id=key_id,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ key=dict(type='str'),
+ label=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ key = module.params['key']
+ state = module.params['state']
+
+ # Check parameters
+ if (key is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_key'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing deploy key (if any)
+ existing_deploy_key = get_existing_deploy_key(module, bitbucket)
+ changed = False
+
+ # Create new deploy key in case it doesn't exists
+ if not existing_deploy_key and (state == 'present'):
+ if not module.check_mode:
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Update deploy key if the old value does not match the new one
+ elif existing_deploy_key and (state == 'present'):
+ if not key.startswith(existing_deploy_key.get('key')):
+ if not module.check_mode:
+ # Bitbucket doesn't support update key for the same label,
+ # so we need to delete the old one first
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Delete deploy key
+ elif existing_deploy_key and (state == 'absent'):
+ if not module.check_mode:
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py
new file mode 100644
index 00000000..ab3b7ec4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_key_pair
+short_description: Manages Bitbucket pipeline SSH key pair
+description:
+ - Manages Bitbucket pipeline SSH key pair.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ public_key:
+ description:
+ - The public key.
+ type: str
+ private_key:
+ description:
+ - The private key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the key pair.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create or update SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ public_key: '{{lookup("file", "bitbucket.pub") }}'
+ private_key: '{{lookup("file", "bitbucket") }}'
+ state: present
+
+- name: Remove SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account, repository or SSH key pair was not found',
+ 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_ssh_key_pair(module, bitbucket):
+ """
+ Retrieves an existing ssh key pair from repository
+ specified in module param `repository`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing key pair or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
+ "type": "pipeline_ssh_key_pair"
+ }
+ """
+ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+
+ info, content = bitbucket.request(
+ api_url=api_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ # Account, repository or SSH key pair was not found.
+ return None
+
+ return content
+
+
+def update_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='PUT',
+ data={
+ 'private_key': module.params['private_key'],
+ 'public_key': module.params['public_key'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
+
+
+def delete_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ public_key=dict(type='str'),
+ private_key=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ state = module.params['state']
+ public_key = module.params['public_key']
+ private_key = module.params['private_key']
+
+ # Check parameters
+ if ((public_key is None) or (private_key is None)) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_keys'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing ssh key
+ key_pair = get_existing_ssh_key_pair(module, bitbucket)
+ changed = False
+
+ # Create or update key pair
+ if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
+ if not module.check_mode:
+ update_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ # Delete key pair
+ elif key_pair and (state == 'absent'):
+ if not module.check_mode:
+ delete_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py
new file mode 100644
index 00000000..dba9f9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_known_host
+short_description: Manages Bitbucket pipeline known hosts
+description:
+ - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+author:
+ - Evgeniy Krysanov (@catcombo)
+requirements:
+ - paramiko
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The FQDN of the known host.
+ type: str
+ required: true
+ key:
+ description:
+ - The public key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the record.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create known hosts from the list
+ community.general.bitbucket_pipeline_known_host:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - bitbucket.org
+ - example.com
+
+- name: Remove known host
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ state: absent
+
+- name: Specify public key file
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import socket
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account or repository was not found',
+ 'unknown_key_type': 'Public key type is unknown',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'known-host-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'known-host-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_known_host(module, bitbucket):
+ """
+ Search for a host in Bitbucket pipelines known hosts
+ with the name specified in module param `name`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing host or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through all response pages in search of hostname we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
+
+ host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
+
+ if host is not None:
+ return host
+
+ return None
+
+
+def get_host_key(module, hostname):
+ """
+ Fetches public key for specified host
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param hostname: host name
+ :return: key type and key content
+ :rtype: tuple
+
+ Return example::
+
+ (
+ 'ssh-rsa',
+ 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
+ )
+ """
+ try:
+ sock = socket.socket()
+ sock.connect((hostname, 22))
+ except socket.error:
+ module.fail_json(msg='Error opening socket to {0}'.format(hostname))
+
+ try:
+ trans = paramiko.transport.Transport(sock)
+ trans.start_client()
+ host_key = trans.get_remote_server_key()
+ except paramiko.SSHException:
+ module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
+
+ trans.close()
+ sock.close()
+
+ key_type = host_key.get_name()
+ key = host_key.get_base64()
+
+ return key_type, key
+
+
+def create_known_host(module, bitbucket):
+ hostname = module.params['name']
+ key_param = module.params['key']
+
+ if key_param is None:
+ key_type, key = get_host_key(module, hostname)
+ elif ' ' in key_param:
+ key_type, key = key_param.split(' ', 1)
+ else:
+ module.fail_json(msg=error_messages['unknown_key_type'])
+
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'hostname': hostname,
+ 'public_key': {
+ 'key_type': key_type,
+ 'key': key,
+ }
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
+ hostname=module.params['hostname'],
+ info=info,
+ ))
+
+
+def delete_known_host(module, bitbucket, known_host_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ known_host_uuid=known_host_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
+ hostname=module.params['name'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ key=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if (module.params['key'] is None) and (not HAS_PARAMIKO):
+ module.fail_json(msg='`paramiko` package not found, please install it.')
+
+ bitbucket = BitbucketHelper(module)
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing known host
+ existing_host = get_existing_known_host(module, bitbucket)
+ state = module.params['state']
+ changed = False
+
+ # Create new host in case it doesn't exists
+ if not existing_host and (state == 'present'):
+ if not module.check_mode:
+ create_known_host(module, bitbucket)
+ changed = True
+
+ # Delete host
+ elif existing_host and (state == 'absent'):
+ if not module.check_mode:
+ delete_known_host(module, bitbucket, existing_host['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py
new file mode 100644
index 00000000..33457fca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_variable
+short_description: Manages Bitbucket pipeline variables
+description:
+ - Manages Bitbucket pipeline variables.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The pipeline variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - The pipeline variable value.
+ type: str
+ secured:
+ description:
+ - Whether to encrypt the variable value.
+ type: bool
+ default: no
+ state:
+ description:
+ - Indicates desired state of the variable.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+ - For secured values return parameter C(changed) is always C(True).
+'''
+
+EXAMPLES = r'''
+- name: Create or update pipeline variables from the list
+ community.general.bitbucket_pipeline_variable:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item.name }}'
+ value: '{{ item.value }}'
+ secured: '{{ item.secured }}'
+ state: present
+ with_items:
+ - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False }
+ - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True }
+
+- name: Remove pipeline variable
+ community.general.bitbucket_pipeline_variable:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: AWS_ACCESS_KEY
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, _load_params
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_value': '`value` is required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'pipeline-variable-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'pipeline-variable-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_pipeline_variable(module, bitbucket):
+ """
+ Search for a pipeline variable
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing variable or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'name': 'AWS_ACCESS_OBKEY_ID',
+ 'value': 'x7HU80-a2',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
+ }
+
+ The `value` key in dict is absent in case of secured variable.
+ """
+ variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ # Look through the all response pages in search of variable we need
+ page = 1
+ while True:
+ next_url = "%s?page=%s" % (variables_base_url, page)
+ info, content = bitbucket.request(
+ api_url=next_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
+
+ # We are at the end of list
+ if 'pagelen' in content and content['pagelen'] == 0:
+ return None
+
+ page += 1
+ var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
+
+ if var is not None:
+ var['name'] = var.pop('key')
+ return var
+
+ return None
+
+
+def create_pipeline_variable(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['name'],
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def update_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='PUT',
+ data={
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def delete_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+class BitBucketPipelineVariable(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ params = _load_params() or {}
+ if params.get('secured'):
+ kwargs['argument_spec']['value'].update({'no_log': True})
+ super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ secured=dict(type='bool', default=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = BitBucketPipelineVariable(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ value = module.params['value']
+ state = module.params['state']
+ secured = module.params['secured']
+
+ # Check parameters
+ if (value is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_value'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing pipeline variable (if any)
+ existing_variable = get_existing_pipeline_variable(module, bitbucket)
+ changed = False
+
+ # Create new variable in case it doesn't exists
+ if not existing_variable and (state == 'present'):
+ if not module.check_mode:
+ create_pipeline_variable(module, bitbucket)
+ changed = True
+
+ # Update variable if it is secured or the old value does not match the new one
+ elif existing_variable and (state == 'present'):
+ if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
+ if not module.check_mode:
+ update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ # Delete variable
+ elif existing_variable and (state == 'absent'):
+ if not module.check_mode:
+ delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py
new file mode 100644
index 00000000..7af3f279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: bzr
+author:
+- André Paramés (@andreparames)
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+options:
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [ parent ]
+ required: yes
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: yes
+ version:
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ default: head
+ force:
+ description:
+ - If C(yes), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was C(yes).
+ type: bool
+ default: 'no'
+ executable:
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+'''
+
+EXAMPLES = '''
+- name: Checkout
+ community.general.bzr:
+ name: bzr+ssh://foosball.example.org/path/to/branch
+ dest: /srv/checkout
+ version: 22
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=no).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', required=True),
+ name=dict(type='str', required=True, aliases=['parent']),
+ version=dict(type='str', default='head'),
+ force=dict(type='bool', default=False),
+ executable=dict(type='str'),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err = (0, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py
new file mode 100644
index 00000000..66ef45f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas <marius@pov.lt>
+# (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - Matthew Gamble (@djmattyg007)
+ - Marius Gedminas (@mgedmin)
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The C(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(ansible.builtin.template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ choices: [ "local", "global", "system" ]
+ state:
+ description:
+ - "Indicates the setting should be set/unset.
+ This parameter has higher precedence than I(value) parameter:
+ when I(state)=absent and I(value) is defined, I(value) is discarded."
+ choices: [ 'present', 'absent' ]
+ default: 'present'
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+'''
+
+EXAMPLES = '''
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+- name: Remove a setting from ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ state: absent
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+- name: Add a setting system-wide
+ community.general.git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: color.ui
+ value: auto
+
+- name: Make etckeeper not complaining when it is invoked by cron
+ community.general.git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+- name: Read individual values from git config
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+
+- name: Scope system is also assumed when reading values, unless list_all=yes
+ community.general.git_config:
+ name: alias.diffc
+
+- name: Read all values from git config
+ community.general.git_config:
+ list_all: yes
+ scope: global
+
+- name: When list_all is yes and no scope is specified, you get configuration from all scopes
+ community.general.git_config:
+ list_all: yes
+
+- name: Specify a repository to include local settings
+ community.general.git_config:
+ list_all: yes
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: str
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dict
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git', True)
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['state'] == 'absent':
+ unset = 'unset'
+ params['value'] = None
+ else:
+ unset = None
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value and not unset:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ elif unset and not out:
+ module.exit_json(changed=False, msg='no setting to unset')
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ if unset:
+ args.insert(len(args) - 1, "--" + unset)
+ cmd = ' '.join(args)
+ else:
+ new_value_quoted = shlex_quote(new_value)
+ cmd = ' '.join(args + [new_value_quoted])
+ try: # try using extra parameter from ansible-base 2.10.4 onwards
+ (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False)
+ except TypeError:
+ # @TODO remove try/except when community.general drop support for 2.10.x
+ if not os.path.isdir(dir):
+ module.fail_json(msg="Cannot find directory '{0}'".format(dir))
+ (rc, out, err) = module.run_command(cmd, cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=cmd)
+
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=(new_value or '') + "\n"
+ ),
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py
new file mode 100644
index 00000000..8836454e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_deploy_key
+author: "Ali (@bincyber)"
+short_description: Manages deploy keys for GitHub repositories.
+description:
+ - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
+ username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
+ rights on the repository are required."
+options:
+ github_url:
+ description:
+ - The base URL of the GitHub API
+ required: false
+ type: str
+ version_added: '0.2.0'
+ default: https://api.github.com
+ owner:
+ description:
+ - The name of the individual account or organization that owns the GitHub repository.
+ required: true
+ aliases: [ 'account', 'organization' ]
+ repo:
+ description:
+ - The name of the GitHub repository.
+ required: true
+ aliases: [ 'repository' ]
+ name:
+ description:
+ - The name for the deploy key.
+ required: true
+ aliases: [ 'title', 'label' ]
+ key:
+ description:
+ - The SSH public key to add to the repository as a deploy key.
+ required: true
+ read_only:
+ description:
+ - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - The state of the deploy key.
+ default: "present"
+ choices: [ "present", "absent" ]
+ force:
+ description:
+ - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ type: bool
+ default: 'no'
+ username:
+ description:
+ - The username to authenticate with. Should not be set when using personal access token
+ password:
+ description:
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ token:
+ description:
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ otp:
+ description:
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ aliases: ['2fa_token']
+notes:
+ - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
+'''
+
+EXAMPLES = '''
+- name: Add a new read-only deploy key to a GitHub repository using basic authentication
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Remove an existing deploy key from a GitHub repository
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ force: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+ state: absent
+
+- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ force: yes
+ token: "ABAQDAwXxn7kIMNWzcDfo..."
+
+- name: Re-add a deploy key to a GitHub repository but with a different name
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "replace-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Add a new deploy key to a GitHub repository using 2FA
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key-2"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ username: "johndoe"
+ password: "supersecretpassword"
+ otp: 123456
+
+- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
+ community.general.github_deploy_key:
+ github_url: "https://api.example.com"
+ owner: "janedoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "janedoe"
+ password: "supersecretpassword"
+'''
+
+RETURN = '''
+msg:
+ description: the status message describing what occurred
+ returned: always
+ type: str
+ sample: "Deploy key added successfully"
+
+http_status_code:
+ description: the HTTP status code returned by the GitHub API
+ returned: failed
+ type: int
+ sample: 400
+
+error:
+ description: the error message returned by the GitHub API
+ returned: failed
+ type: str
+ sample: "key is already in use"
+
+id:
+ description: the key identifier assigned by GitHub for the deploy key
+ returned: changed
+ type: int
+ sample: 24381901
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from re import findall
+
+
+class GithubDeployKey(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.github_url = self.module.params['github_url']
+ self.name = module.params['name']
+ self.key = module.params['key']
+ self.state = module.params['state']
+ self.read_only = module.params.get('read_only', True)
+ self.force = module.params.get('force', False)
+ self.username = module.params.get('username', None)
+ self.password = module.params.get('password', None)
+ self.token = module.params.get('token', None)
+ self.otp = module.params.get('otp', None)
+
+ @property
+ def url(self):
+ owner = self.module.params['owner']
+ repo = self.module.params['repo']
+ return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
+
+ @property
+ def headers(self):
+ if self.username is not None and self.password is not None:
+ self.module.params['url_username'] = self.username
+ self.module.params['url_password'] = self.password
+ self.module.params['force_basic_auth'] = True
+ if self.otp is not None:
+ return {"X-GitHub-OTP": self.otp}
+ elif self.token is not None:
+ return {"Authorization": "token {0}".format(self.token)}
+ else:
+ return None
+
+ def paginate(self, url):
+ while url:
+ resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
+
+ if info["status"] == 200:
+ yield self.module.from_json(resp.read())
+
+ links = {}
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ links[y] = x
+
+ url = links.get('next')
+ else:
+ self.handle_error(method="GET", info=info)
+
+ def get_existing_key(self):
+ for keys in self.paginate(self.url):
+ if keys:
+ for i in keys:
+ existing_key_id = str(i["id"])
+ if i["key"].split() == self.key.split()[:2]:
+ return existing_key_id
+ elif i['title'] == self.name and self.force:
+ return existing_key_id
+ else:
+ return None
+
+ def add_new_key(self):
+ request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
+
+ resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
+
+ status_code = info["status"]
+
+ if status_code == 201:
+ response_body = self.module.from_json(resp.read())
+ key_id = response_body["id"]
+ self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
+ elif status_code == 422:
+ self.module.exit_json(changed=False, msg="Deploy key already exists")
+ else:
+ self.handle_error(method="POST", info=info)
+
+ def remove_existing_key(self, key_id):
+ resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
+
+ status_code = info["status"]
+
+ if status_code == 204:
+ if self.state == 'absent':
+ self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
+ else:
+ self.handle_error(method="DELETE", info=info, key_id=key_id)
+
+ def handle_error(self, method, info, key_id=None):
+ status_code = info['status']
+ body = info.get('body')
+ if body:
+ err = self.module.from_json(body)['message']
+
+ if status_code == 401:
+ self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
+ elif status_code == 404:
+ self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
+ else:
+ if method == "GET":
+ self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
+ elif method == "POST":
+ self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
+ elif method == "DELETE":
+ self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ github_url=dict(required=False, type='str', default="https://api.github.com"),
+ owner=dict(required=True, type='str', aliases=['account', 'organization']),
+ repo=dict(required=True, type='str', aliases=['repository']),
+ name=dict(required=True, type='str', aliases=['title', 'label']),
+ key=dict(required=True, type='str'),
+ read_only=dict(required=False, type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ force=dict(required=False, type='bool', default=False),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True),
+ token=dict(required=False, type='str', no_log=True)
+ ),
+ mutually_exclusive=[
+ ['password', 'token']
+ ],
+ required_together=[
+ ['username', 'password'],
+ ['otp', 'username', 'password']
+ ],
+ required_one_of=[
+ ['username', 'token']
+ ],
+ supports_check_mode=True,
+ )
+
+ deploy_key = GithubDeployKey(module)
+
+ if module.check_mode:
+ key_id = deploy_key.get_existing_key()
+ if deploy_key.state == "present" and key_id is None:
+ module.exit_json(changed=True)
+ elif deploy_key.state == "present" and key_id is not None:
+ module.exit_json(changed=False)
+
+ # to forcefully modify an existing key, the existing key must be deleted first
+ if deploy_key.state == 'absent' or deploy_key.force:
+ key_id = deploy_key.get_existing_key()
+
+ if key_id is not None:
+ deploy_key.remove_existing_key(key_id)
+ elif deploy_key.state == 'absent':
+ module.exit_json(changed=False, msg="Deploy key does not exist")
+
+ if deploy_key.state == "present":
+ deploy_key.add_new_key()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py
new file mode 100644
index 00000000..e326711d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_hooks
+short_description: Manages GitHub service hooks.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Replaced by more granular modules
+ alternative: Use M(community.general.github_webhook) and M(community.general.github_webhook_info) instead.
+description:
+ - Adds service hooks and removes service hooks that have an error status.
+options:
+ user:
+ description:
+ - GitHub username.
+ required: true
+ oauthkey:
+ description:
+ - The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
+ required: true
+ repo:
+ description:
+ - >
+ This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
+ Note this is different than the normal repo url.
+ required: true
+ hookurl:
+ description:
+ - When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
+ required: false
+ action:
+ description:
+ - This tells the githooks module what you want it to do.
+ required: true
+ choices: [ "create", "cleanall", "list", "clean504" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ content_type:
+ description:
+ - Content type to use for requests made to the webhook
+ required: false
+ default: 'json'
+ choices: ['json', 'form']
+
+author: "Phillip Gentry, CX Inc (@pcgentry)"
+'''
+
+EXAMPLES = '''
+- name: Create a new service hook ignoring duplicates
+ community.general.github_hooks:
+ action: create
+ hookurl: http://11.111.111.111:2222
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+
+# Cleaning all hooks for this repo that had an error on the last update.
+# Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+- name: Clean all hooks
+ community.general.github_hooks:
+ action: cleanall
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: '{{ repo }}'
+ delegate_to: localhost
+'''
+
+import json
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes
+
+
+def request(module, url, user, oauthkey, data='', method='GET'):
+ auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method=method)
+ return response, info
+
+
+def _list(module, oauthkey, repo, user):
+ url = "%s/hooks" % repo
+ response, info = request(module, url, user, oauthkey)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+
+def _clean504(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] == 504:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _cleanall(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] != 200:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _create(module, hookurl, oauthkey, repo, user, content_type):
+ url = "%s/hooks" % repo
+ values = {
+ "active": True,
+ "name": "web",
+ "config": {
+ "url": "%s" % hookurl,
+ "content_type": "%s" % content_type
+ }
+ }
+ data = json.dumps(values)
+ response, info = request(module, url, user, oauthkey, data=data, method='POST')
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+
+def _delete(module, oauthkey, repo, user, hookid):
+ url = "%s/hooks/%s" % (repo, hookid)
+ response, info = request(module, url, user, oauthkey, method='DELETE')
+ return response.read()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
+ hookurl=dict(required=False),
+ oauthkey=dict(required=True, no_log=True),
+ repo=dict(required=True),
+ user=dict(required=True),
+ validate_certs=dict(default=True, type='bool'),
+ content_type=dict(default='json', choices=['json', 'form']),
+ )
+ )
+
+ action = module.params['action']
+ hookurl = module.params['hookurl']
+ oauthkey = module.params['oauthkey']
+ repo = module.params['repo']
+ user = module.params['user']
+ content_type = module.params['content_type']
+
+ if action == "list":
+ (rc, out) = _list(module, oauthkey, repo, user)
+
+ if action == "clean504":
+ (rc, out) = _clean504(module, oauthkey, repo, user)
+
+ if action == "cleanall":
+ (rc, out) = _cleanall(module, oauthkey, repo, user)
+
+ if action == "create":
+ (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py
new file mode 100644
index 00000000..9c4b558b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_issue
+short_description: View GitHub issue.
+description:
+ - View GitHub issue for a given repository and organization.
+options:
+ repo:
+ description:
+ - Name of repository from which issue needs to be retrieved.
+ required: true
+ organization:
+ description:
+ - Name of the GitHub organization in which the repository is hosted.
+ required: true
+ issue:
+ description:
+ - Issue number for which information is required.
+ required: true
+ action:
+ description:
+ - Get various details about issue depending upon action specified.
+ default: 'get_status'
+ choices:
+ - 'get_status'
+author:
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+RETURN = '''
+get_status:
+ description: State of the GitHub issue
+ type: str
+ returned: success
+ sample: open, closed
+'''
+
+EXAMPLES = '''
+- name: Check if GitHub issue is closed or not
+ community.general.github_issue:
+ organization: ansible
+ repo: ansible
+ issue: 23642
+ action: get_status
+ register: r
+
+- name: Take action depending upon issue status
+ ansible.builtin.debug:
+ msg: Do something when issue 23642 is open
+ when: r.issue_status == 'open'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ organization=dict(required=True),
+ repo=dict(required=True),
+ issue=dict(type='int', required=True),
+ action=dict(choices=['get_status'], default='get_status'),
+ ),
+ supports_check_mode=True,
+ )
+
+ organization = module.params['organization']
+ repo = module.params['repo']
+ issue = module.params['issue']
+ action = module.params['action']
+
+ result = dict()
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+
+ url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
+
+ response, info = fetch_url(module, url, headers=headers)
+ if not (200 <= info['status'] < 400):
+ if info['status'] == 404:
+ module.fail_json(msg="Failed to find issue %s" % issue)
+ module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
+
+ gh_obj = json.loads(response.read())
+
+ if action == 'get_status' or action is None:
+ if module.check_mode:
+ result.update(changed=True)
+ else:
+ result.update(changed=True, issue_status=gh_obj['state'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py
new file mode 100644
index 00000000..415065f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ type: bool
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import json
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = self.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ result = []
+ while url:
+ r = session.request('GET', url)
+ result.extend(r.json())
+ url = r.links().get('next')
+ return result
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(module, session, name, pubkey, force, check_mode):
+ all_keys = get_all_keys(session)
+ matching_keys = [k for k in all_keys if k['title'] == name]
+ deleted_keys = []
+
+ new_signature = pubkey.split(' ')[1]
+ for key in all_keys:
+ existing_signature = key['key'].split(' ')[1]
+ if new_signature == existing_signature and key['title'] != name:
+ module.fail_json(msg=(
+ "another key with the same content is already registered "
+ "under the name |{0}|").format(key['title']))
+
+ if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(module, session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py
new file mode 100644
index 00000000..5372d6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about GitHub Releases
+options:
+ token:
+ description:
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ user:
+ description:
+ - The GitHub account that owns the repository
+ required: true
+ password:
+ description:
+ - The GitHub account password for the user. Mutually exclusive with C(token).
+ repo:
+ description:
+ - Repository name
+ required: true
+ action:
+ description:
+ - Action to perform
+ required: true
+ choices: [ 'latest_release', 'create_release' ]
+ tag:
+ description:
+ - Tag name when creating a release. Required when using action is set to C(create_release).
+ target:
+ description:
+ - Target of release when creating a release
+ name:
+ description:
+ - Name of release when creating a release
+ body:
+ description:
+ - Description of the release when creating a release
+ draft:
+ description:
+ - Sets if the release is a draft or not. (boolean)
+ type: 'bool'
+ default: 'no'
+ prerelease:
+ description:
+ - Sets if the release is a prerelease or not. (boolean)
+ type: bool
+ default: 'no'
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of a public repository
+ community.general.github_release:
+ user: ansible
+ repo: ansible
+ action: latest_release
+
+- name: Get latest release of testuseer/testrepo
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+
+- name: Get latest release of test repo using username and password. Ansible 2.4.
+ community.general.github_release:
+ user: testuser
+ password: secret123
+ repo: testrepo
+ action: latest_release
+
+- name: Create a new release
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: create_release
+ tag: test
+ target: master
+ name: My Release
+ body: Some description
+
+'''
+
+RETURN = '''
+create_release:
+ description:
+ - Version of the created release
+ - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
+ - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
+ type: str
+ returned: success
+ sample: 1.1.0
+
+latest_release:
+ description: Version of the latest release
+ type: str
+ returned: success
+ sample: 1.1.0
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ action=dict(
+ required=True, choices=['latest_release', 'create_release']),
+ tag=dict(type='str'),
+ target=dict(type='str'),
+ name=dict(type='str'),
+ body=dict(type='str'),
+ draft=dict(type='bool', default=False),
+ prerelease=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('password', 'token'),),
+ required_if=[('action', 'create_release', ['tag']),
+ ('action', 'create_release', ['password', 'token'], True)],
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
+ exception=GITHUB_IMP_ERR)
+
+ repo = module.params['repo']
+ user = module.params['user']
+ password = module.params['password']
+ login_token = module.params['token']
+ action = module.params['action']
+ tag = module.params.get('tag')
+ target = module.params.get('target')
+ name = module.params.get('name')
+ body = module.params.get('body')
+ draft = module.params.get('draft')
+ prerelease = module.params.get('prerelease')
+
+ # login to github
+ try:
+ if password:
+ gh_obj = github3.login(user, password=password)
+ elif login_token:
+ gh_obj = github3.login(token=login_token)
+ else:
+ gh_obj = github3.GitHub()
+
+ # test if we're actually logged in
+ if password or login_token:
+ gh_obj.me()
+ except github3.exceptions.AuthenticationFailed as e:
+ module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
+
+ repository = gh_obj.repository(user, repo)
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+ if action == 'create_release':
+ release_exists = repository.release_from_tag(tag)
+ if release_exists:
+ module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
+
+ release = repository.create_release(
+ tag, target, name, body, draft, prerelease)
+ if release:
+ module.exit_json(changed=True, tag=release.tag_name)
+ else:
+ module.exit_json(changed=False, tag=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py
new file mode 100644
index 00000000..ac153689
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook
+short_description: Manage GitHub webhooks
+description:
+ - "Create and delete GitHub webhooks"
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ url:
+ description:
+ - URL to which payloads will be delivered
+ required: true
+ content_type:
+ description:
+ - The media type used to serialize the payloads
+ required: false
+ choices: [ form, json ]
+ default: form
+ secret:
+ description:
+ - The shared secret between GitHub and the payload URL.
+ required: false
+ insecure_ssl:
+ description:
+ - >
+ Flag to indicate that GitHub should skip SSL verification when calling
+ the hook.
+ required: false
+ type: bool
+ default: false
+ events:
+ description:
+ - >
+ A list of GitHub events the hook is triggered for. Events are listed at
+ U(https://developer.github.com/v3/activity/events/types/). Required
+ unless C(state) is C(absent)
+ required: false
+ type: list
+ elements: str
+ active:
+ description:
+ - Whether or not the hook is active
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether the hook should be present or absent
+ required: false
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the GitHub API
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: create a new webhook that triggers on push (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ events:
+ - push
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+
+- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
+ community.general.github_webhook:
+ repository: myorg/myrepo
+ url: https://jenkins.example.com/ghprbhook/
+ content_type: json
+ secret: "{{ github_shared_secret }}"
+ insecure_ssl: True
+ events:
+ - issue_comment
+ - pull_request
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com
+
+- name: Delete a webhook (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ state: absent
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+'''
+
+RETURN = '''
+---
+hook_id:
+ description: The GitHub ID of the hook created/updated
+ returned: when state is 'present'
+ type: int
+ sample: 6206
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _create_hook_config(module):
+ return {
+ "url": module.params["url"],
+ "content_type": module.params["content_type"],
+ "secret": module.params.get("secret"),
+ "insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
+ }
+
+
+def create_hook(repo, module):
+ config = _create_hook_config(module)
+ try:
+ hook = repo.create_hook(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to create hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return True, data
+
+
+def update_hook(repo, hook, module):
+ config = _create_hook_config(module)
+ try:
+ hook.update()
+ hook.edit(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+
+ changed = hook.update()
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return changed, data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=['repo']),
+ url=dict(type='str', required=True),
+ content_type=dict(
+ type='str',
+ choices=('json', 'form'),
+ required=False,
+ default='form'),
+ secret=dict(type='str', required=False, no_log=True),
+ insecure_ssl=dict(type='bool', required=False, default=False),
+ events=dict(type='list', elements='str', required=False),
+ active=dict(type='bool', required=False, default=True),
+ state=dict(
+ type='str',
+ required=False,
+ choices=('absent', 'present'),
+ default='present'),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'),),
+ required_one_of=(("password", "token"),),
+ required_if=(("state", "present", ("events",)),),
+ )
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ hook = None
+ try:
+ for hook in repo.get_hooks():
+ if hook.config.get("url") == module.params["url"]:
+ break
+ else:
+ hook = None
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
+ module.params["repository"], to_native(err)))
+
+ changed = False
+ data = {}
+ if hook is None and module.params["state"] == "present":
+ changed, data = create_hook(repo, module)
+ elif hook is not None and module.params["state"] == "absent":
+ try:
+ hook.delete()
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to delete hook from repository %s: %s" % (
+ repo.full_name, to_native(err)))
+ else:
+ changed = True
+ elif hook is not None and module.params["state"] == "present":
+ changed, data = update_hook(repo, hook, module)
+ # else, there is no hook and we want there to be no hook
+
+ module.exit_json(changed=changed, **data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py
new file mode 100644
index 00000000..f99a0a03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook_info
+short_description: Query information about GitHub webhooks
+description:
+ - "Query information about GitHub webhooks"
+ - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the github api
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: List hooks for a repository (password auth)
+ community.general.github_webhook_info:
+ repository: ansible/ansible
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+ register: ansible_webhooks
+
+- name: List hooks for a repository on GitHub Enterprise (token auth)
+ community.general.github_webhook_info:
+ repository: myorg/myrepo
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com/api/v3/
+ register: myrepo_webhooks
+'''
+
+RETURN = '''
+---
+hooks:
+ description: A list of hooks that exist for the repo
+ returned: always
+ type: list
+ sample: >
+ [{"has_shared_secret": true,
+ "url": "https://jenkins.example.com/ghprbhook/",
+ "events": ["issue_comment", "pull_request"],
+ "insecure_ssl": "1",
+ "content_type": "json",
+ "active": true,
+ "id": 6206,
+ "last_response": {"status": "active", "message": "OK", "code": 200}}]
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _munge_hook(hook_obj):
+ retval = {
+ "active": hook_obj.active,
+ "events": hook_obj.events,
+ "id": hook_obj.id,
+ "url": hook_obj.url,
+ }
+ retval.update(hook_obj.config)
+ retval["has_shared_secret"] = "secret" in retval
+ if "secret" in retval:
+ del retval["secret"]
+
+ retval["last_response"] = hook_obj.last_response.raw_data
+ return retval
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=["repo"]),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'), ),
+ required_one_of=(("password", "token"), ),
+ supports_check_mode=True)
+ if module._name in ('github_webhook_facts', 'community.general.github_webhook_facts'):
+ module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ try:
+ hooks = [_munge_hook(h) for h in repo.get_hooks()]
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to get hooks from repository %s: %s" %
+ (module.params["repository"], to_native(err)),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, hooks=hooks)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py
new file mode 100644
index 00000000..c66a6f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_deploy_key
+short_description: Manages GitLab project deploy keys.
+description:
+ - Adds, updates and removes project deploy keys
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of project in the form of group/name.
+ required: true
+ type: str
+ title:
+ description:
+ - Deploy key's title.
+ required: true
+ type: str
+ key:
+ description:
+ - Deploy key
+ required: true
+ type: str
+ can_push:
+ description:
+ - Whether this key can push to the project.
+ type: bool
+ default: no
+ state:
+ description:
+ - When C(present) the deploy key added to the project if it doesn't exist.
+ - When C(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+- name: "Adding a project deploy key"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+- name: "Update the above deploy key to add push access"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ can_push: yes
+
+- name: "Remove the previous deploy key from the project"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ state: absent
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: key is already in use"
+
+deploy_key:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabDeployKey(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.deployKeyObject = None
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ @param key_key String of the key
+ @param key_can_push Option of the deployKey
+ @param options Deploy key options
+ '''
+ def createOrUpdateDeployKey(self, project, key_title, key_key, options):
+ changed = False
+
+ # Because we have already call existsDeployKey in main()
+ if self.deployKeyObject is None:
+ deployKey = self.createDeployKey(project, {
+ 'title': key_title,
+ 'key': key_key,
+ 'can_push': options['can_push']})
+ changed = True
+ else:
+ changed, deployKey = self.updateDeployKey(self.deployKeyObject, {
+ 'can_push': options['can_push']})
+
+ self.deployKeyObject = deployKey
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
+
+ try:
+ deployKey.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update deploy key: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the deployKey
+ '''
+ def createDeployKey(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ deployKey = project.keys.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
+
+ return deployKey
+
+ '''
+ @param deployKey Deploy Key Object
+ @param arguments Attributes of the deployKey
+ '''
+ def updateDeployKey(self, deployKey, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(deployKey, arg_key) != arguments[arg_key]:
+ setattr(deployKey, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, deployKey)
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def findDeployKey(self, project, key_title):
+ deployKeys = project.keys.list()
+ for deployKey in deployKeys:
+ if (deployKey.title == key_title):
+ return deployKey
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def existsDeployKey(self, project, key_title):
+ # When project exists, object will be stored in self.projectObject.
+ deployKey = self.findDeployKey(project, key_title)
+ if deployKey:
+ self.deployKeyObject = deployKey
+ return True
+ return False
+
+ def deleteDeployKey(self):
+ if self._module.check_mode:
+ return True
+
+ return self.deployKeyObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ key=dict(type='str', required=True),
+ can_push=dict(type='bool', default=False),
+ title=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ key_title = module.params['title']
+ key_keyfile = module.params['key']
+ key_can_push = module.params['can_push']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
+
+ deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title)
+
+ if state == 'absent':
+ if deployKey_exists:
+ gitlab_deploy_key.deleteDeployKey()
+ module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
+ else:
+ module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py
new file mode 100644
index 00000000..0c612733
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes GitLab Groups
+description:
+ - When the group does not exist in GitLab, it will be created.
+ - When the group does exist and state=absent, the group will be deleted.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the group you want to create, this will be api_url/group_path
+ - If not supplied, the group_name will be used.
+ type: str
+ description:
+ description:
+ - A description for the group.
+ type: str
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ parent:
+ description:
+ - Allow to create subgroups
+ - Id or Full path of parent group in the form of group/name
+ type: str
+ visibility:
+ description:
+ - Default visibility of the group
+ choices: ["private", "internal", "public"]
+ default: private
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_group
+ state: absent
+
+- name: "Create GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+
+# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group
+- name: "Create GitLab SubGroup"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+ parent: "super_parent/parent"
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+group:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.groupObject = None
+
+ '''
+ @param group Group object
+ '''
+ def getGroupId(self, group):
+ if group is not None:
+ return group.id
+ return None
+
+ '''
+ @param name Name of the group
+ @param parent Parent group full path
+ @param options Group options
+ '''
+ def createOrUpdateGroup(self, name, parent, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.groupObject is None:
+ parent_id = self.getGroupId(parent)
+
+ payload = {
+ 'name': name,
+ 'path': options['path'],
+ 'parent_id': parent_id,
+ 'visibility': options['visibility']
+ }
+ if options.get('description'):
+ payload['description'] = options['description']
+ group = self.createGroup(payload)
+ changed = True
+ else:
+ changed, group = self.updateGroup(self.groupObject, {
+ 'name': name,
+ 'description': options['description'],
+ 'visibility': options['visibility']})
+
+ self.groupObject = group
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name)
+
+ try:
+ group.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update group: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the group
+ '''
+ def createGroup(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ group = self._gitlab.groups.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
+
+ return group
+
+ '''
+ @param group Group Object
+ @param arguments Attributes of the group
+ '''
+ def updateGroup(self, group, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(group, arg_key) != arguments[arg_key]:
+ setattr(group, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, group)
+
+ def deleteGroup(self):
+ group = self.groupObject
+
+ if len(group.projects.list()) >= 1:
+ self._module.fail_json(
+ msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ else:
+ if self._module.check_mode:
+ return True
+
+ try:
+ group.delete()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
+
+ '''
+ @param name Name of the groupe
+ @param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ '''
+ def existsGroup(self, project_identifier):
+ # When group/user exists, object will be stored in self.groupObject.
+ group = findGroup(self._gitlab, project_identifier)
+ if group:
+ self.groupObject = group
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ parent=dict(type='str'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_name = module.params['name']
+ group_path = module.params['path']
+ description = module.params['description']
+ state = module.params['state']
+ parent_identifier = module.params['parent']
+ group_visibility = module.params['visibility']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Define default group_path based on group_name
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ gitlab_group = GitLabGroup(module, gitlab_instance)
+
+ parent_group = None
+ if parent_identifier:
+ parent_group = findGroup(gitlab_instance, parent_identifier)
+ if not parent_group:
+ module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists")
+
+ group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path)
+ else:
+ group_exists = gitlab_group.existsGroup(group_path)
+
+ if state == 'absent':
+ if group_exists:
+ gitlab_group.deleteGroup()
+ module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
+ else:
+ module.exit_json(changed=False, msg="Group deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
+ "path": group_path,
+ "description": description,
+ "visibility": group_visibility}):
+ module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py
new file mode 100644
index 00000000..8a3da2a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_group_members
+short_description: Manage group members on GitLab Server
+description:
+ - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab.
+version_added: '1.2.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - administrator rights on the GitLab server
+extends_documentation_fragment: community.general.auth_basic
+options:
+ api_token:
+ description:
+ - A personal access token to authenticate with the GitLab API.
+ required: true
+ type: str
+ gitlab_group:
+ description:
+ - The name of the GitLab group the member is added to/removed from.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - The username of the member to add to/remove from the GitLab group.
+ required: true
+ type: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ state:
+ description:
+ - State of the member in the group.
+ - On C(present), it adds a user to a GitLab group.
+ - On C(absent), it removes a user from a GitLab group.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+import traceback
+
+try:
+ import gitlab
+ HAS_PY_GITLAB = True
+except ImportError:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_PY_GITLAB = False
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ # get user id if the user exists
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user)
+ if user_exists:
+ return user_exists[0].id
+
+ # get group id if group exists
+ def get_group_id(self, gitlab_group):
+ group_exists = self._gitlab.groups.list(search=gitlab_group)
+ if group_exists:
+ return group_exists[0].id
+
+ # get all members in a group
+ def get_members_in_a_group(self, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ return group.members.list()
+
+ # check if the user is a member of the group
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a group
+ def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ add_member = group.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ if add_member:
+ return add_member.username
+
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e))
+
+ # remove user from a group
+ def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ group.members.delete(gitlab_user_id)
+
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ self._module.fail_json(
+ msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e))
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a group
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ try:
+ member.access_level = access_level
+ member.save()
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', required=True, no_log=True),
+ gitlab_group=dict(type='str', required=True),
+ gitlab_user=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level']],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PY_GITLAB:
+ module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
+
+ gitlab_group = module.params['gitlab_group']
+ gitlab_user = module.params['gitlab_user']
+ state = module.params['state']
+ access_level = module.params['access_level']
+
+ # convert access level string input to int
+ if access_level:
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS
+ }
+
+ access_level = access_level_int[access_level]
+
+ # connect to gitlab server
+ gl = gitlabAuthentication(module)
+
+ group = GitLabGroup(module, gl)
+
+ gitlab_user_id = group.get_user_id(gitlab_user)
+ gitlab_group_id = group.get_group_id(gitlab_group)
+
+ # group doesn't exist
+ if not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user)
+ else:
+ module.fail_json(msg="user '%s' not found." % gitlab_user)
+
+ members = group.get_members_in_a_group(gitlab_group_id)
+ is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the group
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the group
+ if not module.check_mode:
+ group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level)
+ module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user)
+ # state as absent
+ else:
+ module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user)
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = group.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == access_level:
+ module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user)
+ else:
+ # update the access level for the user
+ if not module.check_mode:
+ group.update_user_access_level(members, gitlab_user_id, access_level)
+ module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
+ else:
+ # remove the user from the group
+ if not module.check_mode:
+ group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
+ module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py
new file mode 100644
index 00000000..dd20a0b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io)
+# Based on code:
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_variable
+short_description: Creates, updates, or deletes GitLab groups variables
+version_added: 1.2.0
+description:
+ - Creates a group variable if it does not exist.
+ - When a group variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab group,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - Florent Madiot (@scodeman)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete group variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ group:
+ description:
+ - The path and name of the group.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to C(true), delete all variables which are not untouched in the task.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, set masked and protected to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
+ default: {}
+ type: dict
+notes:
+- Supports I(check_mode).
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = r'''
+group_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabGroupVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.group = self.get_group(module.params['group'])
+ self._module = module
+
+ def get_group(self, group_name):
+ return self.repo.groups.get(group_name)
+
+ def list_all_group_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.group.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.group.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.group.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.group.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+ if not isinstance(var_list[key], (string_types, integer_types, float, dict)):
+ module.fail_json(msg="Value of %s variable must be of type string, integer, float or dict, passed %s" % (key, var_list[key].__class__.__name__))
+
+ for key in var_list:
+
+ if isinstance(var_list[key], (string_types, integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ group=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=changed, group_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py
new file mode 100644
index 00000000..bc4b6ecb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_hook
+short_description: Manages GitLab project hooks.
+description:
+ - Adds, updates and removes project hook
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of the project in the form of group/name.
+ required: true
+ type: str
+ hook_url:
+ description:
+ - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ required: true
+ type: str
+ state:
+ description:
+ - When C(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When C(absent) hook will be deleted if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ push_events:
+ description:
+ - Trigger hook on push events.
+ type: bool
+ default: yes
+ push_events_branch_filter:
+ description:
+ - Branch name of wildcard to trigger hook on push events
+ type: str
+ version_added: '0.2.0'
+ issues_events:
+ description:
+ - Trigger hook on issues events.
+ type: bool
+ default: no
+ merge_requests_events:
+ description:
+ - Trigger hook on merge requests events.
+ type: bool
+ default: no
+ tag_push_events:
+ description:
+ - Trigger hook on tag push events.
+ type: bool
+ default: no
+ note_events:
+ description:
+ - Trigger hook on note events or when someone adds a comment.
+ type: bool
+ default: no
+ job_events:
+ description:
+ - Trigger hook on job events.
+ type: bool
+ default: no
+ pipeline_events:
+ description:
+ - Trigger hook on pipeline events.
+ type: bool
+ default: no
+ wiki_page_events:
+ description:
+ - Trigger hook on wiki events.
+ type: bool
+ default: no
+ hook_validate_certs:
+ description:
+ - Whether GitLab will do SSL verification when triggering the hook.
+ type: bool
+ default: no
+ aliases: [ enable_ssl_verification ]
+ token:
+ description:
+ - Secret token to validate hook messages at the receiver.
+ - If this is present it will always result in a change as it cannot be retrieved from GitLab.
+ - Will show up in the X-GitLab-Token HTTP request header.
+ required: false
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Adding a project hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: present
+ push_events: yes
+ tag_push_events: yes
+ hook_validate_certs: no
+ token: "my-super-secret-token-that-my-ci-server-will-check"
+
+- name: "Delete the previous hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+
+- name: "Delete a hook by numeric project id"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: 10
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+hook:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabHook(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.hookObject = None
+
+ '''
+ @param project Project Object
+ @param hook_url Url to call on event
+ @param description Description of the group
+ @param parent Parent group full path
+ '''
+ def createOrUpdateHook(self, project, hook_url, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.hookObject is None:
+ hook = self.createHook(project, {
+ 'url': hook_url,
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+ changed = True
+ else:
+ changed, hook = self.updateHook(self.hookObject, {
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+
+ self.hookObject = hook
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
+
+ try:
+ hook.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update hook: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the hook
+ '''
+ def createHook(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ hook = project.hooks.create(arguments)
+
+ return hook
+
+ '''
+ @param hook Hook Object
+ @param arguments Attributes of the hook
+ '''
+ def updateHook(self, hook, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(hook, arg_key) != arguments[arg_key]:
+ setattr(hook, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, hook)
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def findHook(self, project, hook_url):
+ hooks = project.hooks.list()
+ for hook in hooks:
+ if (hook.url == hook_url):
+ return hook
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def existsHook(self, project, hook_url):
+ # When project exists, object will be stored in self.projectObject.
+ hook = self.findHook(project, hook_url)
+ if hook:
+ self.hookObject = hook
+ return True
+ return False
+
+ def deleteHook(self):
+ if self._module.check_mode:
+ return True
+
+ return self.hookObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ hook_url=dict(type='str', required=True),
+ push_events=dict(type='bool', default=True),
+ push_events_branch_filter=dict(type='str', default=''),
+ issues_events=dict(type='bool', default=False),
+ merge_requests_events=dict(type='bool', default=False),
+ tag_push_events=dict(type='bool', default=False),
+ note_events=dict(type='bool', default=False),
+ job_events=dict(type='bool', default=False),
+ pipeline_events=dict(type='bool', default=False),
+ wiki_page_events=dict(type='bool', default=False),
+ hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
+ token=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ hook_url = module.params['hook_url']
+ push_events = module.params['push_events']
+ push_events_branch_filter = module.params['push_events_branch_filter']
+ issues_events = module.params['issues_events']
+ merge_requests_events = module.params['merge_requests_events']
+ tag_push_events = module.params['tag_push_events']
+ note_events = module.params['note_events']
+ job_events = module.params['job_events']
+ pipeline_events = module.params['pipeline_events']
+ wiki_page_events = module.params['wiki_page_events']
+ enable_ssl_verification = module.params['hook_validate_certs']
+ hook_token = module.params['token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_hook = GitLabHook(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
+
+ hook_exists = gitlab_hook.existsHook(project, hook_url)
+
+ if state == 'absent':
+ if hook_exists:
+ gitlab_hook.deleteHook()
+ module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
+ else:
+ module.exit_json(changed=False, msg="Hook deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_hook.createOrUpdateHook(project, hook_url, {
+ "push_events": push_events,
+ "push_events_branch_filter": push_events_branch_filter,
+ "issues_events": issues_events,
+ "merge_requests_events": merge_requests_events,
+ "tag_push_events": tag_push_events,
+ "note_events": note_events,
+ "job_events": job_events,
+ "pipeline_events": pipeline_events,
+ "wiki_page_events": wiki_page_events,
+ "enable_ssl_verification": enable_ssl_verification,
+ "token": hook_token}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py
new file mode 100644
index 00000000..98631c74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes GitLab Projects
+description:
+ - When the project does not exist in GitLab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ group:
+ description:
+ - Id or The full path of the group of which this projects belongs to.
+ type: str
+ name:
+ description:
+ - The name of the project
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path.
+ - If not supplied, name will be used.
+ type: str
+ description:
+ description:
+ - An description for the project.
+ type: str
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ visibility:
+ description:
+ - Private. Project access must be granted explicitly for each user.
+ - Internal. The project can be cloned by any logged in user.
+ - Public. The project can be cloned without any authentication.
+ default: private
+ type: str
+ choices: ["private", "internal", "public"]
+ aliases:
+ - visibility_level
+ import_url:
+ description:
+ - Git repository which will be imported into gitlab.
+ - GitLab server needs read access to this git repository.
+ required: false
+ type: str
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ merge_method:
+ description:
+ - What requirements are placed upon merges.
+ - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ type: str
+ choices: ["ff", "merge", "rebase_merge"]
+ default: merge
+ version_added: "1.0.0"
+'''
+
+EXAMPLES = r'''
+- name: Delete GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_project
+ state: absent
+ delegate_to: localhost
+
+- name: Create GitLab Project in group Ansible
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_project
+ group: ansible
+ issues_enabled: False
+ merge_method: rebase_merge
+ wiki_enabled: True
+ snippets_enabled: True
+ import_url: http://git.example.com/example/lab.git
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server.
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API.
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+project:
+ description: API object.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication
+
+
+class GitLabProject(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.projectObject = None
+
+ '''
+ @param project_name Name of the project
+ @param namespace Namespace Object (User or Group)
+ @param options Options of the project
+ '''
+ def createOrUpdateProject(self, project_name, namespace, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.projectObject is None:
+ project = self.createProject(namespace, {
+ 'name': project_name,
+ 'path': options['path'],
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility'],
+ 'import_url': options['import_url']})
+ changed = True
+ else:
+ changed, project = self.updateProject(self.projectObject, {
+ 'name': project_name,
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility']})
+
+ self.projectObject = project
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
+
+ try:
+ project.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed update project: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param namespace Namespace Object (User or Group)
+ @param arguments Attributes of the project
+ '''
+ def createProject(self, namespace, arguments):
+ if self._module.check_mode:
+ return True
+
+ arguments['namespace_id'] = namespace.id
+ try:
+ project = self._gitlab.projects.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
+
+ return project
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the project
+ '''
+ def updateProject(self, project, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(project, arg_key) != arguments[arg_key]:
+ setattr(project, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, project)
+
+ def deleteProject(self):
+ if self._module.check_mode:
+ return True
+
+ project = self.projectObject
+
+ return project.delete()
+
+ '''
+ @param namespace User/Group object
+ @param name Name of the project
+ '''
+ def existsProject(self, namespace, path):
+ # When project exists, object will be stored in self.projectObject.
+ project = findProject(self._gitlab, namespace.full_path + '/' + path)
+ if project:
+ self.projectObject = project
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ group=dict(type='str'),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ issues_enabled=dict(type='bool', default=True),
+ merge_requests_enabled=dict(type='bool', default=True),
+ merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]),
+ wiki_enabled=dict(type='bool', default=True),
+ snippets_enabled=dict(default=True, type='bool'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
+ import_url=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_identifier = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ project_description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ merge_method = module.params['merge_method']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ visibility = module.params['visibility']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ gitlab_project = GitLabProject(module, gitlab_instance)
+
+ if group_identifier:
+ group = findGroup(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
+
+ namespace = gitlab_instance.namespaces.get(group.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+ else:
+ user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0]
+ namespace = gitlab_instance.namespaces.get(user.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+
+ if state == 'absent':
+ if project_exists:
+ gitlab_project.deleteProject()
+ module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
+ else:
+ module.exit_json(changed=False, msg="Project deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_project.createOrUpdateProject(project_name, namespace, {
+ "path": project_path,
+ "description": project_description,
+ "issues_enabled": issues_enabled,
+ "merge_requests_enabled": merge_requests_enabled,
+ "merge_method": merge_method,
+ "wiki_enabled": wiki_enabled,
+ "snippets_enabled": snippets_enabled,
+ "visibility": visibility,
+ "import_url": import_url}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py
new file mode 100644
index 00000000..9803f76b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_project_variable
+short_description: Creates/updates/deletes GitLab Projects Variables
+description:
+ - When a project variable does not exist, it will be created.
+ - When a project variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab project,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - "Markus Bergholz (@markuman)"
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete project variable.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to true, all variables which are not untouched in the task will be deleted.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, masked and protected will be set to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
+ default: {}
+ type: dict
+'''
+
+
+EXAMPLES = '''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = '''
+project_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabProjectVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.project = self.get_project(module.params['project'])
+ self._module = module
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def list_all_project_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.project.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.project.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.project.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.project.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+
+ if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+ else:
+ module.fail_json(msg="value must be of type string, integer or dict")
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
+
+ change, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=change, project_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py
new file mode 100644
index 00000000..70384914
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_runner
+short_description: Create, modify and delete GitLab Runners.
+description:
+ - Register, update and delete runners with the GitLab API.
+ - All operations are performed using the GitLab API v4.
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
+ U(https://$GITLAB_URL/profile/personal_access_tokens).
+ - A valid registration token is required for registering a new runner.
+ To create shared runners, you need to ask your administrator to give you this token.
+ It can be found at U(https://$GITLAB_URL/admin/runners/).
+notes:
+ - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - Runners need to have unique descriptions.
+author:
+ - Samy Coenen (@SamyCoenen)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 1.5.0
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - Your private token to interact with the GitLab API.
+ type: str
+ description:
+ description:
+ - The unique name of the runner.
+ required: True
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name.
+ required: False
+ default: present
+ choices: ["present", "absent"]
+ type: str
+ registration_token:
+ description:
+ - The registration token is used to register new runners.
+ required: True
+ type: str
+ active:
+ description:
+ - Define if the runners is immediately active after creation.
+ required: False
+ default: yes
+ type: bool
+ locked:
+ description:
+ - Determines if the runner is locked or not.
+ required: False
+ default: False
+ type: bool
+ access_level:
+ description:
+ - Determines if a runner can pick up jobs from protected branches.
+ required: False
+ default: ref_protected
+ choices: ["ref_protected", "not_protected"]
+ type: str
+ maximum_timeout:
+ description:
+ - The maximum timeout that a runner has to pick up a specific job.
+ required: False
+ default: 3600
+ type: int
+ run_untagged:
+ description:
+ - Run untagged jobs or not.
+ required: False
+ default: yes
+ type: bool
+ tag_list:
+ description: The tags that apply to the runner.
+ required: False
+ default: []
+ type: list
+'''
+
+EXAMPLES = '''
+- name: "Register runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: Docker Machine t1
+ state: present
+ active: True
+ tag_list: ['docker']
+ run_untagged: False
+ locked: False
+
+- name: "Delete runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+runner:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+try:
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+
+class GitLabRunner(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.runnerObject = None
+
+ def createOrUpdateRunner(self, description, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.runnerObject is None:
+ runner = self.createRunner({
+ 'description': description,
+ 'active': options['active'],
+ 'token': options['registration_token'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'tag_list': options['tag_list']})
+ changed = True
+ else:
+ changed, runner = self.updateRunner(self.runnerObject, {
+ 'active': options['active'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'access_level': options['access_level'],
+ 'tag_list': options['tag_list']})
+
+ self.runnerObject = runner
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the runner
+ '''
+ def createRunner(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ runner = self._gitlab.runners.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
+
+ return runner
+
+ '''
+ @param runner Runner object
+ @param arguments Attributes of the runner
+ '''
+ def updateRunner(self, runner, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if isinstance(arguments[arg_key], list):
+ list1 = getattr(runner, arg_key)
+ list1.sort()
+ list2 = arguments[arg_key]
+ list2.sort()
+ if cmp(list1, list2):
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+ else:
+ if getattr(runner, arg_key) != arguments[arg_key]:
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, runner)
+
+ '''
+ @param description Description of the runner
+ '''
+ def findRunner(self, description):
+ runners = self._gitlab.runners.all(as_list=False)
+ for runner in runners:
+ if (runner['description'] == description):
+ return self._gitlab.runners.get(runner['id'])
+
+ '''
+ @param description Description of the runner
+ '''
+ def existsRunner(self, description):
+ # When runner exists, object will be stored in self.runnerObject.
+ runner = self.findRunner(description)
+
+ if runner:
+ self.runnerObject = runner
+ return True
+ return False
+
+ def deleteRunner(self):
+ if self._module.check_mode:
+ return True
+
+ runner = self.runnerObject
+
+ return runner.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ description=dict(type='str', required=True, aliases=["name"]),
+ active=dict(type='bool', default=True),
+ tag_list=dict(type='list', default=[]),
+ run_untagged=dict(type='bool', default=True),
+ locked=dict(type='bool', default=False),
+ access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
+ maximum_timeout=dict(type='int', default=3600),
+ registration_token=dict(type='str', required=True, no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ runner_description = module.params['description']
+ runner_active = module.params['active']
+ tag_list = module.params['tag_list']
+ run_untagged = module.params['run_untagged']
+ runner_locked = module.params['locked']
+ access_level = module.params['access_level']
+ maximum_timeout = module.params['maximum_timeout']
+ registration_token = module.params['registration_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_runner = GitLabRunner(module, gitlab_instance)
+ runner_exists = gitlab_runner.existsRunner(runner_description)
+
+ if state == 'absent':
+ if runner_exists:
+ gitlab_runner.deleteRunner()
+ module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, msg="Runner deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_runner.createOrUpdateRunner(runner_description, {
+ "active": runner_active,
+ "tag_list": tag_list,
+ "run_untagged": run_untagged,
+ "locked": runner_locked,
+ "access_level": access_level,
+ "maximum_timeout": maximum_timeout,
+ "registration_token": registration_token}):
+ module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs,
+ msg="Successfully created or updated the runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs,
+ msg="No need to update the runner %s" % runner_description)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py
new file mode 100644
index 00000000..1e8ee65a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
+description:
+ - When the user does not exist in GitLab, it will be created.
+ - When the user exists and state=absent, the user will be deleted.
+ - When the user exists and state=blocked, the user will be blocked.
+ - When changes are made to user, the user will be updated.
+notes:
+ - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the user you want to create.
+ - Required only if C(state) is set to C(present).
+ type: str
+ username:
+ description:
+ - The username of the user.
+ required: true
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
+ - Required only if C(state) is set to C(present).
+ type: str
+ email:
+ description:
+ - The email that belongs to the user.
+ - Required only if C(state) is set to C(present).
+ type: str
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ type: str
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ type: str
+ group:
+ description:
+ - Id or Full path of parent group in the form of group/name.
+ - Add user as an member to this group.
+ type: str
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master (alias for maintainer)
+ - maintainer
+ - owner
+ default: guest
+ type: str
+ choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
+ state:
+ description:
+ - Create, delete or block a user.
+ default: present
+ type: str
+ choices: ["present", "absent", "blocked", "unblocked"]
+ confirm:
+ description:
+ - Require confirmation.
+ type: bool
+ default: yes
+ isadmin:
+ description:
+ - Grant admin privileges to the user.
+ type: bool
+ default: no
+ external:
+ description:
+ - Define external parameter for this user.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: absent
+
+- name: "Create GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ sshkey_name: MySSH
+ sshkey_file: ssh-rsa AAAAB3NzaC1yc...
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Block GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: blocked
+
+- name: "Unblock GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: unblocked
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+user:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabUser(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.userObject = None
+ self.ACCESS_LEVEL = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'master': gitlab.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS}
+
+ '''
+ @param username Username of the user
+ @param options User options
+ '''
+ def createOrUpdateUser(self, username, options):
+ changed = False
+ potentionally_changed = False
+
+ # Because we have already call userExists in main()
+ if self.userObject is None:
+ user = self.createUser({
+ 'name': options['name'],
+ 'username': username,
+ 'password': options['password'],
+ 'email': options['email'],
+ 'skip_confirmation': not options['confirm'],
+ 'admin': options['isadmin'],
+ 'external': options['external']})
+ changed = True
+ else:
+ changed, user = self.updateUser(
+ self.userObject, {
+ # add "normal" parameters here, put uncheckable
+ # params in the dict below
+ 'name': {'value': options['name']},
+ 'email': {'value': options['email']},
+
+ # note: for some attributes like this one the key
+ # from reading back from server is unfortunately
+ # different to the one needed for pushing/writing,
+ # in that case use the optional setter key
+ 'is_admin': {
+ 'value': options['isadmin'], 'setter': 'admin'
+ },
+ 'external': {'value': options['external']},
+ },
+ {
+ # put "uncheckable" params here, this means params
+ # which the gitlab does accept for setting but does
+ # not return any information about it
+ 'skip_reconfirmation': {'value': not options['confirm']},
+ 'password': {'value': options['password']},
+ }
+ )
+
+ # note: as we unfortunately have some uncheckable parameters
+ # where it is not possible to determine if the update
+ # changed something or not, we must assume here that a
+ # changed happend and that an user object update is needed
+ potentionally_changed = True
+
+ # Assign ssh keys
+ if options['sshkey_name'] and options['sshkey_file']:
+ key_changed = self.addSshKeyToUser(user, {
+ 'name': options['sshkey_name'],
+ 'file': options['sshkey_file']})
+ changed = changed or key_changed
+
+ # Assign group
+ if options['group_path']:
+ group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level'])
+ changed = changed or group_changed
+
+ self.userObject = user
+ if (changed or potentionally_changed) and not self._module.check_mode:
+ try:
+ user.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
+
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
+ return True
+ else:
+ return False
+
+ '''
+ @param group User object
+ '''
+ def getUserId(self, user):
+ if user is not None:
+ return user.id
+ return None
+
+ '''
+ @param user User object
+ @param sshkey_name Name of the ssh key
+ '''
+ def sshKeyExists(self, user, sshkey_name):
+ keyList = map(lambda k: k.title, user.keys.list())
+
+ return sshkey_name in keyList
+
+ '''
+ @param user User object
+ @param sshkey Dict containing sshkey infos {"name": "", "file": ""}
+ '''
+ def addSshKeyToUser(self, user, sshkey):
+ if not self.sshKeyExists(user, sshkey['name']):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user.keys.create({
+ 'title': sshkey['name'],
+ 'key': sshkey['file']})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to find
+ '''
+ def findMember(self, group, user_id):
+ try:
+ member = group.members.get(user_id)
+ except gitlab.exceptions.GitlabGetError:
+ return None
+ return member
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ '''
+ def memberExists(self, group, user_id):
+ member = self.findMember(group, user_id)
+
+ return member is not None
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ @param access_level GitLab access_level to check
+ '''
+ def memberAsGoodAccessLevel(self, group, user_id, access_level):
+ member = self.findMember(group, user_id)
+
+ return member.access_level == access_level
+
+ '''
+ @param user User object
+ @param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ @param access_level GitLab access_level to assign
+ '''
+ def assignUserToGroup(self, user, group_identifier, access_level):
+ group = findGroup(self._gitlab, group_identifier)
+
+ if self._module.check_mode:
+ return True
+
+ if group is None:
+ return False
+
+ if self.memberExists(group, self.getUserId(user)):
+ member = self.findMember(group, self.getUserId(user))
+ if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
+ member.access_level = self.ACCESS_LEVEL[access_level]
+ member.save()
+ return True
+ else:
+ try:
+ group.members.create({
+ 'user_id': self.getUserId(user),
+ 'access_level': self.ACCESS_LEVEL[access_level]})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param user User object
+ @param arguments User attributes
+ '''
+ def updateUser(self, user, arguments, uncheckable_args):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ av = arg_value['value']
+
+ if av is not None:
+ if getattr(user, arg_key) != av:
+ setattr(user, arg_value.get('setter', arg_key), av)
+ changed = True
+
+ for arg_key, arg_value in uncheckable_args.items():
+ av = arg_value['value']
+
+ if av is not None:
+ setattr(user, arg_value.get('setter', arg_key), av)
+
+ return (changed, user)
+
+ '''
+ @param arguments User attributes
+ '''
+ def createUser(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user = self._gitlab.users.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
+
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def findUser(self, username):
+ users = self._gitlab.users.list(search=username)
+ for user in users:
+ if (user.username == username):
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def existsUser(self, username):
+ # When user exists, object will be stored in self.userObject.
+ user = self.findUser(username)
+ if user:
+ self.userObject = user
+ return True
+ return False
+
+ '''
+ @param username Username of the user
+ '''
+ def isActive(self, username):
+ user = self.findUser(username)
+ return user.attributes['state'] == 'active'
+
+ def deleteUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.delete()
+
+ def blockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.block()
+
+ def unblockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.unblock()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
+ username=dict(type='str', required=True),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str'),
+ sshkey_name=dict(type='str'),
+ sshkey_file=dict(type='str'),
+ group=dict(type='str'),
+ access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
+ confirm=dict(type='bool', default=True),
+ isadmin=dict(type='bool', default=False),
+ external=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['name', 'email', 'password']),
+ )
+ )
+
+ user_name = module.params['name']
+ state = module.params['state']
+ user_username = module.params['username'].lower()
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_path = module.params['group']
+ access_level = module.params['access_level']
+ confirm = module.params['confirm']
+ user_isadmin = module.params['isadmin']
+ user_external = module.params['external']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_user = GitLabUser(module, gitlab_instance)
+ user_exists = gitlab_user.existsUser(user_username)
+ if user_exists:
+ user_is_active = gitlab_user.isActive(user_username)
+ else:
+ user_is_active = False
+
+ if state == 'absent':
+ if user_exists:
+ gitlab_user.deleteUser()
+ module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User deleted or does not exists")
+
+ if state == 'blocked':
+ if user_exists and user_is_active:
+ gitlab_user.blockUser()
+ module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User already blocked or does not exists")
+
+ if state == 'unblocked':
+ if user_exists and not user_is_active:
+ gitlab_user.unblockUser()
+ module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User is not blocked or does not exists")
+
+ if state == 'present':
+ if gitlab_user.createOrUpdateUser(user_username, {
+ "name": user_name,
+ "password": user_password,
+ "email": user_email,
+ "sshkey_name": user_sshkey_name,
+ "sshkey_file": user_sshkey_file,
+ "group_path": group_path,
+ "access_level": access_level,
+ "confirm": confirm,
+ "isadmin": user_isadmin,
+ "external": user_external}):
+ module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py
new file mode 100644
index 00000000..5c084d3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Yeukhon Wong <yeukhon@acm.org>
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: hg
+short_description: Manages Mercurial (hg) repositories
+description:
+ - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
+author: "Yeukhon Wong (@yeukhon)"
+options:
+ repo:
+ description:
+ - The repository address.
+ required: yes
+ aliases: [ name ]
+ dest:
+ description:
+ - Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
+ revision:
+ description:
+ - Equivalent C(-r) option in hg command which could be the changeset, revision number,
+ branch name or even tag.
+ aliases: [ version ]
+ force:
+ description:
+ - Discards uncommitted changes. Runs C(hg update -C). Prior to
+ 1.9, the default was `yes`.
+ type: bool
+ default: 'no'
+ purge:
+ description:
+ - Deletes untracked files. Runs C(hg purge).
+ type: bool
+ default: 'no'
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository
+ type: bool
+ default: 'yes'
+ clone:
+ description:
+ - If C(no), do not clone the repository if it does not exist locally.
+ type: bool
+ default: 'yes'
+ executable:
+ description:
+ - Path to hg executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+notes:
+ - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such,
+ if the underlying system still uses a Python version below 2.7.9, you will have issues checking out
+ bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+'''
+
+EXAMPLES = '''
+- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any.
+ community.general.hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally.
+ community.general.hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: no
+ update: no
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Hg(object):
+ def __init__(self, module, dest, repo, revision, hg_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.hg_path = hg_path
+
+ def _command(self, args_list):
+ (rc, out, err) = self.module.run_command([self.hg_path] + args_list)
+ return (rc, out, err)
+
+ def _list_untracked(self):
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
+ return self._command(args)
+
+ def get_revision(self):
+ """
+ hg id -b -i -t returns a string in the format:
+ "<changeset>[+] <branch_name> <tag>"
+ This format lists the state of the current working copy,
+ and indicates whether there are uncommitted changes by the
+ plus sign. Otherwise, the sign is omitted.
+
+ Read the full description via hg id --help
+ """
+ (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def has_local_mods(self):
+ now = self.get_revision()
+ if '+' in now:
+ return True
+ else:
+ return False
+
+ def discard(self):
+ before = self.has_local_mods()
+ if not before:
+ return False
+
+ args = ['update', '-C', '-R', self.dest, '-r', '.']
+ (rc, out, err) = self._command(args)
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ after = self.has_local_mods()
+ if before != after and not after: # no more local modification
+ return True
+
+ def purge(self):
+ # before purge, find out if there are any untracked files
+ (rc1, out1, err1) = self._list_untracked()
+ if rc1 != 0:
+ self.module.fail_json(msg=err1)
+
+ # there are some untrackd files
+ if out1 != '':
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
+ (rc2, out2, err2) = self._command(args)
+ if rc2 != 0:
+ self.module.fail_json(msg=err2)
+ return True
+ else:
+ return False
+
+ def cleanup(self, force, purge):
+ discarded = False
+ purged = False
+
+ if force:
+ discarded = self.discard()
+ if purge:
+ purged = self.purge()
+ if discarded or purged:
+ return True
+ else:
+ return False
+
+ def pull(self):
+ return self._command(
+ ['pull', '-R', self.dest, self.repo])
+
+ def update(self):
+ if self.revision is not None:
+ return self._command(['update', '-r', self.revision, '-R', self.dest])
+ return self._command(['update', '-R', self.dest])
+
+ def clone(self):
+ if self.revision is not None:
+ return self._command(['clone', self.repo, self.dest, '-r', self.revision])
+ return self._command(['clone', self.repo, self.dest])
+
+ @property
+ def at_revision(self):
+ """
+ There is no point in pulling from a potentially down/slow remote site
+ if the desired changeset is already the current changeset.
+ """
+ if self.revision is None or len(self.revision) < 7:
+ # Assume it's a rev number, tag, or branch
+ return False
+ (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ if out.startswith(self.revision):
+ return True
+ return False
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True, aliases=['name']),
+ dest=dict(type='path'),
+ revision=dict(type='str', default=None, aliases=['version']),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ update=dict(type='bool', default=True),
+ clone=dict(type='bool', default=True),
+ executable=dict(type='str', default=None),
+ ),
+ )
+ repo = module.params['repo']
+ dest = module.params['dest']
+ revision = module.params['revision']
+ force = module.params['force']
+ purge = module.params['purge']
+ update = module.params['update']
+ clone = module.params['clone']
+ hg_path = module.params['executable'] or module.get_bin_path('hg', True)
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
+
+ # initial states
+ before = ''
+ changed = False
+ cleaned = False
+
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
+
+ hg = Hg(module, dest, repo, revision, hg_path)
+
+ # If there is no hgrc file, then assume repo is absent
+ # and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
+ if not os.path.exists(hgrc):
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ before = hg.get_revision()
+ elif hg.at_revision:
+ # no update needed, don't pull
+ before = hg.get_revision()
+
+ # but force and purge if desired
+ cleaned = hg.cleanup(force, purge)
+ else:
+ # get the current state before doing pulling
+ before = hg.get_revision()
+
+ # can perform force and purge
+ cleaned = hg.cleanup(force, purge)
+
+ (rc, out, err) = hg.pull()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ (rc, out, err) = hg.update()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ after = hg.get_revision()
+ if before != after or cleaned:
+ changed = True
+
+ module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py
new file mode 100644
index 00000000..77e3b153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Renato Orgito <orgito@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: spectrum_device
+short_description: Creates/deletes devices in CA Spectrum.
+description:
+ - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html).
+ - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1
+author: "Renato Orgito (@orgito)"
+options:
+ device:
+ type: str
+ aliases: [ host, name ]
+ required: true
+ description:
+ - IP address of the device.
+ - If a hostname is given, it will be resolved to the IP address.
+ community:
+ type: str
+ description:
+ - SNMP community used for device discovery.
+ - Required when C(state=present).
+ required: true
+ landscape:
+ type: str
+ required: true
+ description:
+ - Landscape handle of the SpectroServer to which add or remove the device.
+ state:
+ type: str
+ required: false
+ description:
+ - On C(present) creates the device when it does not exist.
+ - On C(absent) removes the device when it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ url:
+ type: str
+ aliases: [ oneclick_url ]
+ required: true
+ description:
+ - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port]
+ url_username:
+ type: str
+ aliases: [ oneclick_user ]
+ required: true
+ description:
+ - Oneclick user name.
+ url_password:
+ type: str
+ aliases: [ oneclick_password ]
+ required: true
+ description:
+ - Oneclick user password.
+ use_proxy:
+ required: false
+ description:
+ - if C(no), it will not use a proxy, even if one is defined in an environment
+ variable on the target hosts.
+ default: 'yes'
+ type: bool
+ validate_certs:
+ required: false
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+ agentport:
+ type: int
+ required: false
+ description:
+ - UDP port used for SNMP discovery.
+ default: 161
+notes:
+ - The devices will be created inside the I(Universe) container of the specified landscape.
+ - All the operations will be performed only on the specified landscape.
+'''
+
+EXAMPLES = '''
+- name: Add device to CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ community: secret
+ landscape: '0x100000'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ state: present
+
+
+- name: Remove device from CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ landscape: '{{ landscape_handle }}'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ use_proxy: no
+ state: absent
+'''
+
+RETURN = '''
+device:
+ description: device data when state = present
+ returned: success
+ type: dict
+ sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+'''
+
+from socket import gethostbyname, gaierror
+import xml.etree.ElementTree as ET
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(resource, xml=None, method=None):
+ headers = {
+ "Content-Type": "application/xml",
+ "Accept": "application/xml"
+ }
+
+ url = module.params['oneclick_url'] + '/spectrum/restful/' + resource
+
+ response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45)
+
+ if info['status'] == 401:
+ module.fail_json(msg="failed to authenticate to Oneclick server")
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ return response.read()
+
+
+def post(resource, xml=None):
+ return request(resource, xml=xml, method='POST')
+
+
+def delete(resource):
+ return request(resource, xml=None, method='DELETE')
+
+
+def get_ip():
+ try:
+ device_ip = gethostbyname(module.params.get('device'))
+ except gaierror:
+ module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device'))
+
+ return device_ip
+
+
+def get_device(device_ip):
+ """Query OneClick for the device using the IP Address"""
+ resource = '/models'
+ landscape_min = "0x%x" % int(module.params.get('landscape'), 16)
+ landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000)
+
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <rs:model-request throttlesize="5"
+ xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ <action-models>
+ <filtered-models>
+ <and>
+ <equals>
+ <model-type>SearchManager</model-type>
+ </equals>
+ <greater-than>
+ <attribute id="0x129fa">
+ <value>{mh_min}</value>
+ </attribute>
+ </greater-than>
+ <less-than>
+ <attribute id="0x129fa">
+ <value>{mh_max}</value>
+ </attribute>
+ </less-than>
+ </and>
+ </filtered-models>
+ <action>FIND_DEV_MODELS_BY_IP</action>
+ <attribute id="AttributeID.NETWORK_ADDRESS">
+ <value>{search_ip}</value>
+ </attribute>
+ </action-models>
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ <rs:requested-attribute id="0x12d7f" /> <!--Network Address-->
+ </rs:model-request>
+ """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max)
+
+ result = post(resource, xml=xml)
+
+ root = ET.fromstring(result)
+
+ if root.get('total-models') == '0':
+ return None
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+
+ # get the first device
+ model = root.find('ca:model-responses', namespace).find('ca:model', namespace)
+
+ if model.get('error'):
+ module.fail_json(msg="error checking device: %s" % model.get('error'))
+
+ # get the attributes
+ model_handle = model.get('mh')
+
+ model_address = model.find('./*[@id="0x12d7f"]').text
+
+ # derive the landscape handler from the model handler of the device
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=model_address,
+ landscape=model_landscape)
+
+ return device
+
+
+def add_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device:
+ module.exit_json(changed=False, device=device)
+
+ if module.check_mode:
+ device = dict(
+ model_handle=None,
+ address=device_ip,
+ landscape="0x%x" % int(module.params.get('landscape'), 16))
+ module.exit_json(changed=True, device=device)
+
+ resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community')
+ resource += '&landscapeid=' + module.params.get('landscape')
+
+ if module.params.get('agentport', None):
+ resource += '&agentport=' + str(module.params.get('agentport', 161))
+
+ result = post(resource)
+ root = ET.fromstring(result)
+
+ if root.get('error') != 'Success':
+ module.fail_json(msg=root.get('error-message'))
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ model = root.find('ca:model', namespace)
+
+ model_handle = model.get('mh')
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=device_ip,
+ landscape=model_landscape,
+ )
+
+ module.exit_json(changed=True, device=device)
+
+
+def remove_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ resource = '/model/' + device['model_handle']
+ result = delete(resource)
+
+ root = ET.fromstring(result)
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ error = root.find('ca:error', namespace).text
+
+ if error != 'Success':
+ error_message = root.find('ca:error-message', namespace).text
+ module.fail_json(msg="%s %s" % (error, error_message))
+
+ module.exit_json(changed=True)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(required=True, aliases=['host', 'name']),
+ landscape=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ?
+ agentport=dict(type='int', default=161),
+ url=dict(required=True, aliases=['oneclick_url']),
+ url_username=dict(required=True, aliases=['oneclick_user']),
+ url_password=dict(required=True, no_log=True, aliases=['oneclick_password']),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ required_if=[('state', 'present', ['community'])],
+ supports_check_mode=True
+ )
+
+ if module.params.get('state') == 'present':
+ add_device()
+ else:
+ remove_device()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
new file mode 100644
index 00000000..8f05da7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
@@ -0,0 +1,1543 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = '''
+---
+module: spotinst_aws_elastigroup
+short_description: Create, update or delete Spotinst AWS Elastigroups
+author: Spotinst (@talzur)
+description:
+ - Can create, update, or delete Spotinst AWS Elastigroups
+ Launch configuration is part of the elastigroup configuration,
+ so no additional modules are necessary for handling the launch configuration.
+ You will have to have a credentials file in this location - <home>/.spotinst/credentials
+ The credentials file must contain a row that looks like this
+ token = <YOUR TOKEN>
+ Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
+requirements:
+ - python >= 2.7
+ - spotinst_sdk >= 1.0.38
+options:
+
+ credentials_path:
+ description:
+ - (Path) Optional parameter that allows to set a non-default credentials path.
+ default: ~/.spotinst/credentials
+ type: path
+
+ account_id:
+ description:
+ - (String) Optional parameter that allows to set an account-id inside the module configuration
+ By default this is retrieved from the credentials path
+ type: str
+
+ availability_vs_cost:
+ description:
+ - (String) The strategy orientation.
+ - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ required: true
+ type: str
+
+ availability_zones:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ name (String),
+ subnet_id (String),
+ placement_group_name (String),
+ required: true
+ type: list
+
+ block_device_mappings:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
+ You can specify virtual devices and EBS volumes.;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ device_name (List of Strings),
+ virtual_name (String),
+ no_device (String),
+ ebs (Object, expects the following keys-
+ delete_on_termination(Boolean),
+ encrypted(Boolean),
+ iops (Integer),
+ snapshot_id(Integer),
+ volume_type(String),
+ volume_size(Integer))
+ type: list
+
+ chef:
+ description:
+ - (Object) The Chef integration configuration.;
+ Expects the following keys - chef_server (String),
+ organization (String),
+ user (String),
+ pem_key (String),
+ chef_version (String)
+ type: dict
+
+ draining_timeout:
+ description:
+ - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
+ type: int
+
+ ebs_optimized:
+ description:
+ - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
+ Note - additional charges will be applied.
+ type: bool
+
+ ebs_volume_pool:
+ description:
+ - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ volume_ids (List of Strings),
+ device_name (String)
+ type: list
+
+ ecs:
+ description:
+ - (Object) The ECS integration configuration.;
+ Expects the following key -
+ cluster_name (String)
+ type: dict
+
+ elastic_ips:
+ description:
+ - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ type: list
+
+ fallback_to_od:
+ description:
+ - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
+ type: bool
+
+ health_check_grace_period:
+ description:
+ - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
+ - If not specified, it defaults to C(300).
+ type: int
+
+ health_check_unhealthy_duration_before_replacement:
+ description:
+ - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
+ type: int
+
+ health_check_type:
+ description:
+ - (String) The service to use for the health check.
+ - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ type: str
+
+ iam_role_name:
+ description:
+ - (String) The instance profile iamRole name
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ iam_role_arn:
+ description:
+ - (String) The instance profile iamRole arn
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ id:
+ description:
+ - (String) The group id if it already exists and you want to update, or delete it.
+ This will not work unless the uniqueness_by field is set to id.
+ When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
+ type: str
+
+ image_id:
+ description:
+ - (String) The image Id used to launch the instance.;
+ In case of conflict between Instance type and image type, an error will be returned
+ required: true
+ type: str
+
+ key_pair:
+ description:
+ - (String) Specify a Key Pair to attach to the instances
+ type: str
+
+ kubernetes:
+ description:
+ - (Object) The Kubernetes integration configuration.
+ Expects the following keys -
+ api_server (String),
+ token (String)
+ type: dict
+
+ lifetime_period:
+ description:
+ - (Integer) lifetime period
+ type: int
+
+ load_balancers:
+ description:
+ - (List of Strings) List of classic ELB names
+ type: list
+
+ max_size:
+ description:
+ - (Integer) The upper limit number of instances that you can scale up to
+ required: true
+ type: int
+
+ mesosphere:
+ description:
+ - (Object) The Mesosphere integration configuration.
+ Expects the following key -
+ api_server (String)
+ type: dict
+
+ min_size:
+ description:
+ - (Integer) The lower limit number of instances that you can scale down to
+ required: true
+ type: int
+
+ monitoring:
+ description:
+ - (String) Describes whether instance Enhanced Monitoring is enabled
+ type: str
+
+ name:
+ description:
+ - (String) Unique name for elastigroup to be created, updated or deleted
+ required: true
+ type: str
+
+ network_interfaces:
+ description:
+ - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ description (String),
+ device_index (Integer),
+ secondary_private_ip_address_count (Integer),
+ associate_public_ip_address (Boolean),
+ delete_on_termination (Boolean),
+ groups (List of Strings),
+ network_interface_id (String),
+ private_ip_address (String),
+ subnet_id (String),
+ associate_ipv6_address (Boolean),
+ private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
+ type: list
+
+ on_demand_count:
+ description:
+ - (Integer) Required if risk is not set
+ - Number of on demand instances to launch. All other instances will be spot instances.;
+ Either set this parameter or the risk parameter
+ type: int
+
+ on_demand_instance_type:
+ description:
+ - (String) On-demand instance type that will be provisioned
+ type: str
+
+ opsworks:
+ description:
+ - (Object) The elastigroup OpsWorks integration configration.;
+ Expects the following key -
+ layer_id (String)
+ type: dict
+
+ persistence:
+ description:
+ - (Object) The Stateful elastigroup configration.;
+ Accepts the following keys -
+ should_persist_root_device (Boolean),
+ should_persist_block_devices (Boolean),
+ should_persist_private_ip (Boolean)
+ type: dict
+
+ product:
+ description:
+ - (String) Operation system type.
+ - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ required: true
+ type: str
+
+ rancher:
+ description:
+ - (Object) The Rancher integration configuration.;
+ Expects the following keys -
+ version (String),
+ access_key (String),
+ secret_key (String),
+ master_host (String)
+ type: dict
+
+ right_scale:
+ description:
+ - (Object) The Rightscale integration configuration.;
+ Expects the following keys -
+ account_id (String),
+ refresh_token (String)
+ type: dict
+
+ risk:
+ description:
+ - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
+ type: int
+
+ roll_config:
+ description:
+ - (Object) Roll configuration.;
+ If you would like the group to roll after updating, please use this feature.
+ Accepts the following keys -
+ batch_size_percentage(Integer, Required),
+ grace_period - (Integer, Required),
+ health_check_type(String, Optional)
+ type: dict
+
+ scheduled_tasks:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ adjustment (Integer),
+ scale_target_capacity (Integer),
+ scale_min_capacity (Integer),
+ scale_max_capacity (Integer),
+ adjustment_percentage (Integer),
+ batch_size_percentage (Integer),
+ cron_expression (String),
+ frequency (String),
+ grace_period (Integer),
+ task_type (String, required),
+ is_enabled (Boolean)
+ type: list
+
+ security_group_ids:
+ description:
+ - (List of Strings) One or more security group IDs. ;
+ In case of update it will override the existing Security Group with the new given array
+ required: true
+ type: list
+
+ shutdown_script:
+ description:
+ - (String) The Base64-encoded shutdown script that executes prior to instance termination.
+ Encode before setting.
+ type: str
+
+ signals:
+ description:
+ - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
+ keys allowed are -
+ name (String, required),
+ timeout (Integer)
+ type: list
+
+ spin_up_time:
+ description:
+ - (Integer) spin up time, in seconds, for the instance
+ type: int
+
+ spot_instance_types:
+ description:
+ - (List of Strings) Spot instance type that will be provisioned.
+ required: true
+ type: list
+
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - (String) create or delete the elastigroup
+ default: present
+ type: str
+
+ tags:
+ description:
+ - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
+ type: list
+
+ target:
+ description:
+ - (Integer) The number of instances to launch
+ required: true
+ type: int
+
+ target_group_arns:
+ description:
+ - (List of Strings) List of target group arns instances should be registered to
+ type: list
+
+ tenancy:
+ description:
+ - (String) dedicated vs shared tenancy.
+ - "The available choices are: C(default), C(dedicated)."
+ type: str
+
+ terminate_at_end_of_billing_hour:
+ description:
+ - (Boolean) terminate at the end of billing hour
+ type: bool
+
+ unit:
+ description:
+ - (String) The capacity unit to launch instances by.
+ - "The available choices are: C(instance), C(weight)."
+ type: str
+
+ up_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
+ statistic (String, required)
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ min_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ down_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
+ statistic (String, required),
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ max_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ target_tracking_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ source (String, required),
+ metric_name (String, required),
+ statistic (String, required),
+ unit (String, required),
+ cooldown (String, required),
+ target (String, required)
+ type: list
+
+ uniqueness_by:
+ choices:
+ - id
+ - name
+ description:
+ - (String) If your group names are not unique, you may use this feature to update or delete a specific group.
+ Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ default: name
+ type: str
+
+ user_data:
+ description:
+ - (String) Base64-encoded MIME user data. Encode before setting the value.
+ type: str
+
+ utilize_reserved_instances:
+ description:
+ - (Boolean) In case of any available Reserved Instances,
+ Elastigroup will utilize your reservations before purchasing Spot instances.
+ type: bool
+
+ wait_for_instances:
+ description:
+ - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - (Integer) How long the module should wait for instances before failing the action.;
+ Only works if wait_for_instances is True.
+ type: int
+
+'''
+EXAMPLES = '''
+# Basic configuration YAML example
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/sda1'
+ ebs:
+ volume_size: 100
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
+# In organizations with more than one account, it is required to specify an account_id
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ ebs:
+ volume_size: 60
+ volume_type: gp2
+ - device_name: '/dev/xvdb'
+ ebs:
+ volume_size: 120
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example we have set up block device mapping with ephemeral devices
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ virtual_name: ephemeral0
+ - device_name: '/dev/xvdb/'
+ virtual_name: ephemeral1
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example we create a basic group configuration with a network interface defined.
+# Each network interface must have a device index
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ network_interfaces:
+ - associate_public_ip_address: true
+ device_index: 0
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+
+# In this example we create a basic group configuration with a target tracking scaling policy defined
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ account_id: act-92d45673
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-79da021e
+ image_id: ami-f173cc91
+ fallback_to_od: true
+ tags:
+ - Creator: ValueOfCreatorTag
+ - Environment: ValueOfEnvironmentTag
+ key_pair: spotinst-labs-oregon
+ max_size: 10
+ min_size: 0
+ target: 2
+ unit: instance
+ monitoring: True
+ name: ansible-group-1
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-46cdc13d
+ spot_instance_types:
+ - c3.large
+ target_tracking_policies:
+ - policy_name: target-tracking-1
+ namespace: AWS/EC2
+ metric_name: CPUUtilization
+ statistic: average
+ unit: percent
+ target: 50
+ cooldown: 120
+ do_not_update:
+ - image_id
+ register: result
+ - ansible.builtin.debug: var=result
+'''
+
+RETURN = '''
+---
+instances:
+ description: List of active elastigroup instances and their details.
+ returned: success
+ type: dict
+ sample: [
+ {
+ "spotInstanceRequestId": "sir-regs25zp",
+ "instanceId": "i-09640ad8678234c",
+ "instanceType": "m4.large",
+ "product": "Linux/UNIX",
+ "availabilityZone": "us-west-2b",
+ "privateIp": "180.0.2.244",
+ "createdAt": "2017-07-17T12:46:18.000Z",
+ "status": "fulfilled"
+ }
+ ]
+group_id:
+ description: Created / Updated group's ID.
+ returned: success
+ type: str
+ sample: "sig-12345"
+
+'''
+
+HAS_SPOTINST_SDK = False
+__metaclass__ = type
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import spotinst_sdk as spotinst
+ from spotinst_sdk import SpotinstClientException
+
+ HAS_SPOTINST_SDK = True
+
+except ImportError:
+ pass
+
+eni_fields = ('description',
+ 'device_index',
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address',
+ 'delete_on_termination',
+ 'groups',
+ 'network_interface_id',
+ 'private_ip_address',
+ 'subnet_id',
+ 'associate_ipv6_address')
+
+private_ip_fields = ('private_ip_address',
+ 'primary')
+
+capacity_fields = (dict(ansible_field_name='min_size',
+ spotinst_field_name='minimum'),
+ dict(ansible_field_name='max_size',
+ spotinst_field_name='maximum'),
+ 'target',
+ 'unit')
+
+lspec_fields = ('user_data',
+ 'key_pair',
+ 'tenancy',
+ 'shutdown_script',
+ 'monitoring',
+ 'ebs_optimized',
+ 'image_id',
+ 'health_check_type',
+ 'health_check_grace_period',
+ 'health_check_unhealthy_duration_before_replacement',
+ 'security_group_ids')
+
+iam_fields = (dict(ansible_field_name='iam_role_name',
+ spotinst_field_name='name'),
+ dict(ansible_field_name='iam_role_arn',
+ spotinst_field_name='arn'))
+
+scheduled_task_fields = ('adjustment',
+ 'adjustment_percentage',
+ 'batch_size_percentage',
+ 'cron_expression',
+ 'frequency',
+ 'grace_period',
+ 'task_type',
+ 'is_enabled',
+ 'scale_target_capacity',
+ 'scale_min_capacity',
+ 'scale_max_capacity')
+
+scaling_policy_fields = ('policy_name',
+ 'namespace',
+ 'metric_name',
+ 'dimensions',
+ 'statistic',
+ 'evaluation_periods',
+ 'period',
+ 'threshold',
+ 'cooldown',
+ 'unit',
+ 'operator')
+
+tracking_policy_fields = ('policy_name',
+ 'namespace',
+ 'source',
+ 'metric_name',
+ 'statistic',
+ 'unit',
+ 'cooldown',
+ 'target',
+ 'threshold')
+
+action_fields = (dict(ansible_field_name='action_type',
+ spotinst_field_name='type'),
+ 'adjustment',
+ 'min_target_capacity',
+ 'max_target_capacity',
+ 'target',
+ 'minimum',
+ 'maximum')
+
+signal_fields = ('name',
+ 'timeout')
+
+multai_lb_fields = ('balancer_id',
+ 'project_id',
+ 'target_set_id',
+ 'az_awareness',
+ 'auto_weight')
+
+persistence_fields = ('should_persist_root_device',
+ 'should_persist_block_devices',
+ 'should_persist_private_ip')
+
+strategy_fields = ('risk',
+ 'utilize_reserved_instances',
+ 'fallback_to_od',
+ 'on_demand_count',
+ 'availability_vs_cost',
+ 'draining_timeout',
+ 'spin_up_time',
+ 'lifetime_period')
+
+ebs_fields = ('delete_on_termination',
+ 'encrypted',
+ 'iops',
+ 'snapshot_id',
+ 'volume_type',
+ 'volume_size')
+
+bdm_fields = ('device_name',
+ 'virtual_name',
+ 'no_device')
+
+kubernetes_fields = ('api_server',
+ 'token')
+
+right_scale_fields = ('account_id',
+ 'refresh_token')
+
+rancher_fields = ('access_key',
+ 'secret_key',
+ 'master_host',
+ 'version')
+
+chef_fields = ('chef_server',
+ 'organization',
+ 'user',
+ 'pem_key',
+ 'chef_version')
+
+az_fields = ('name',
+ 'subnet_id',
+ 'placement_group_name')
+
+opsworks_fields = ('layer_id',)
+
+scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
+
+mesosphere_fields = ('api_server',)
+
+ecs_fields = ('cluster_name',)
+
+multai_fields = ('multai_token',)
+
+
+def handle_elastigroup(client, module):
+ has_changed = False
+ group_id = None
+ message = 'None'
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ uniqueness_by = module.params.get('uniqueness_by')
+ external_group_id = module.params.get('id')
+
+ if uniqueness_by == 'id':
+ if external_group_id is None:
+ should_create = True
+ else:
+ should_create = False
+ group_id = external_group_id
+ else:
+ groups = client.get_elastigroups()
+ should_create, group_id = find_group_with_same_name(groups, name)
+
+ if should_create is True:
+ if state == 'present':
+ eg = expand_elastigroup(module, is_update=False)
+ module.debug(str(" [INFO] " + message + "\n"))
+ group = client.create_elastigroup(group=eg)
+ group_id = group['id']
+ message = 'Created group Successfully.'
+ has_changed = True
+
+ elif state == 'absent':
+ message = 'Cannot delete non-existent group.'
+ has_changed = False
+ else:
+ eg = expand_elastigroup(module, is_update=True)
+
+ if state == 'present':
+ group = client.update_elastigroup(group_update=eg, group_id=group_id)
+ message = 'Updated group successfully.'
+
+ try:
+ roll_config = module.params.get('roll_config')
+ if roll_config:
+ eg_roll = spotinst.aws_elastigroup.Roll(
+ batch_size_percentage=roll_config.get('batch_size_percentage'),
+ grace_period=roll_config.get('grace_period'),
+ health_check_type=roll_config.get('health_check_type')
+ )
+ roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
+ message = 'Updated and started rolling the group successfully.'
+
+ except SpotinstClientException as exc:
+ message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
+ has_changed = True
+
+ elif state == 'absent':
+ try:
+ client.delete_elastigroup(group_id=group_id)
+ except SpotinstClientException as exc:
+ if "GROUP_DOESNT_EXIST" in exc.message:
+ pass
+ else:
+ module.fail_json(msg="Error while attempting to delete group : " + exc.message)
+
+ message = 'Deleted group successfully.'
+ has_changed = True
+
+ return group_id, message, has_changed
+
+
+def retrieve_group_instances(client, module, group_id):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+
+ health_check_type = module.params.get('health_check_type')
+
+ if wait_timeout is None:
+ wait_timeout = 300
+
+ wait_timeout = time.time() + wait_timeout
+ target = module.params.get('target')
+ state = module.params.get('state')
+ instances = list()
+
+ if state == 'present' and group_id is not None and wait_for_instances is True:
+
+ is_amount_fulfilled = False
+ while is_amount_fulfilled is False and wait_timeout > time.time():
+ instances = list()
+ amount_of_fulfilled_instances = 0
+
+ if health_check_type is not None:
+ healthy_instances = client.get_instance_healthiness(group_id=group_id)
+
+ for healthy_instance in healthy_instances:
+ if healthy_instance.get('healthStatus') == 'HEALTHY':
+ amount_of_fulfilled_instances += 1
+ instances.append(healthy_instance)
+
+ else:
+ active_instances = client.get_elastigroup_active_instances(group_id=group_id)
+
+ for active_instance in active_instances:
+ if active_instance.get('private_ip') is not None:
+ amount_of_fulfilled_instances += 1
+ instances.append(active_instance)
+
+ if amount_of_fulfilled_instances >= target:
+ is_amount_fulfilled = True
+
+ time.sleep(10)
+
+ return instances
+
+
+def find_group_with_same_name(groups, name):
+ for group in groups:
+ if group['name'] == name:
+ return False, group.get('id')
+
+ return True, None
+
+
+def expand_elastigroup(module, is_update):
+ do_not_update = module.params['do_not_update']
+ name = module.params.get('name')
+
+ eg = spotinst.aws_elastigroup.Elastigroup()
+ description = module.params.get('description')
+
+ if name is not None:
+ eg.name = name
+ if description is not None:
+ eg.description = description
+
+ # Capacity
+ expand_capacity(eg, module, is_update, do_not_update)
+ # Strategy
+ expand_strategy(eg, module)
+ # Scaling
+ expand_scaling(eg, module)
+ # Third party integrations
+ expand_integrations(eg, module)
+ # Compute
+ expand_compute(eg, module, is_update, do_not_update)
+ # Multai
+ expand_multai(eg, module)
+ # Scheduling
+ expand_scheduled_tasks(eg, module)
+
+ return eg
+
+
+def expand_compute(eg, module, is_update, do_not_update):
+ elastic_ips = module.params['elastic_ips']
+ on_demand_instance_type = module.params.get('on_demand_instance_type')
+ spot_instance_types = module.params['spot_instance_types']
+ ebs_volume_pool = module.params['ebs_volume_pool']
+ availability_zones_list = module.params['availability_zones']
+ product = module.params.get('product')
+
+ eg_compute = spotinst.aws_elastigroup.Compute()
+
+ if product is not None:
+ # Only put product on group creation
+ if is_update is not True:
+ eg_compute.product = product
+
+ if elastic_ips is not None:
+ eg_compute.elastic_ips = elastic_ips
+
+ if on_demand_instance_type or spot_instance_types is not None:
+ eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
+
+ if on_demand_instance_type is not None:
+ eg_instance_types.spot = spot_instance_types
+ if spot_instance_types is not None:
+ eg_instance_types.ondemand = on_demand_instance_type
+
+ if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
+ eg_compute.instance_types = eg_instance_types
+
+ expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
+
+ eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
+
+ expand_launch_spec(eg_compute, module, is_update, do_not_update)
+
+ eg.compute = eg_compute
+
+
+def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
+ if ebs_volumes_list is not None:
+ eg_volumes = []
+
+ for volume in ebs_volumes_list:
+ eg_volume = spotinst.aws_elastigroup.EbsVolume()
+
+ if volume.get('device_name') is not None:
+ eg_volume.device_name = volume.get('device_name')
+ if volume.get('volume_ids') is not None:
+ eg_volume.volume_ids = volume.get('volume_ids')
+
+ if eg_volume.device_name is not None:
+ eg_volumes.append(eg_volume)
+
+ if len(eg_volumes) > 0:
+ eg_compute.ebs_volume_pool = eg_volumes
+
+
+def expand_launch_spec(eg_compute, module, is_update, do_not_update):
+ eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
+
+ if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
+ eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
+
+ tags = module.params['tags']
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ block_device_mappings = module.params['block_device_mappings']
+ network_interfaces = module.params['network_interfaces']
+
+ if is_update is True:
+ if 'image_id' in do_not_update:
+ delattr(eg_launch_spec, 'image_id')
+
+ expand_tags(eg_launch_spec, tags)
+
+ expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
+
+ expand_block_device_mappings(eg_launch_spec, block_device_mappings)
+
+ expand_network_interfaces(eg_launch_spec, network_interfaces)
+
+ eg_compute.launch_specification = eg_launch_spec
+
+
+def expand_integrations(eg, module):
+ rancher = module.params.get('rancher')
+ mesosphere = module.params.get('mesosphere')
+ ecs = module.params.get('ecs')
+ kubernetes = module.params.get('kubernetes')
+ right_scale = module.params.get('right_scale')
+ opsworks = module.params.get('opsworks')
+ chef = module.params.get('chef')
+
+ integration_exists = False
+
+ eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
+
+ if mesosphere is not None:
+ eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
+ integration_exists = True
+
+ if ecs is not None:
+ eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
+ integration_exists = True
+
+ if kubernetes is not None:
+ eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
+ integration_exists = True
+
+ if right_scale is not None:
+ eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
+ integration_exists = True
+
+ if opsworks is not None:
+ eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
+ integration_exists = True
+
+ if rancher is not None:
+ eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
+ integration_exists = True
+
+ if chef is not None:
+ eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
+ integration_exists = True
+
+ if integration_exists:
+ eg.third_parties_integration = eg_integrations
+
+
+def expand_capacity(eg, module, is_update, do_not_update):
+ eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
+
+ if is_update is True:
+ delattr(eg_capacity, 'unit')
+
+ if 'target' in do_not_update:
+ delattr(eg_capacity, 'target')
+
+ eg.capacity = eg_capacity
+
+
+def expand_strategy(eg, module):
+ persistence = module.params.get('persistence')
+ signals = module.params.get('signals')
+
+ eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
+
+ terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
+
+ if terminate_at_end_of_billing_hour is not None:
+ eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
+ module.params, 'ScalingStrategy')
+
+ if persistence is not None:
+ eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
+
+ if signals is not None:
+ eg_signals = expand_list(signals, signal_fields, 'Signal')
+
+ if len(eg_signals) > 0:
+ eg_strategy.signals = eg_signals
+
+ eg.strategy = eg_strategy
+
+
+def expand_multai(eg, module):
+ multai_load_balancers = module.params.get('multai_load_balancers')
+
+ eg_multai = expand_fields(multai_fields, module.params, 'Multai')
+
+ if multai_load_balancers is not None:
+ eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
+
+ if len(eg_multai_load_balancers) > 0:
+ eg_multai.balancers = eg_multai_load_balancers
+ eg.multai = eg_multai
+
+
+def expand_scheduled_tasks(eg, module):
+ scheduled_tasks = module.params.get('scheduled_tasks')
+
+ if scheduled_tasks is not None:
+ eg_scheduling = spotinst.aws_elastigroup.Scheduling()
+
+ eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
+
+ if len(eg_tasks) > 0:
+ eg_scheduling.tasks = eg_tasks
+ eg.scheduling = eg_scheduling
+
+
+def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
+ if load_balancers is not None or target_group_arns is not None:
+ eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
+ eg_total_lbs = []
+
+ if load_balancers is not None:
+ for elb_name in load_balancers:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if elb_name is not None:
+ eg_elb.name = elb_name
+ eg_elb.type = 'CLASSIC'
+ eg_total_lbs.append(eg_elb)
+
+ if target_group_arns is not None:
+ for target_arn in target_group_arns:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if target_arn is not None:
+ eg_elb.arn = target_arn
+ eg_elb.type = 'TARGET_GROUP'
+ eg_total_lbs.append(eg_elb)
+
+ if len(eg_total_lbs) > 0:
+ eg_load_balancers_config.load_balancers = eg_total_lbs
+ eg_launchspec.load_balancers_config = eg_load_balancers_config
+
+
+def expand_tags(eg_launchspec, tags):
+ if tags is not None:
+ eg_tags = []
+
+ for tag in tags:
+ eg_tag = spotinst.aws_elastigroup.Tag()
+ if tag.keys():
+ eg_tag.tag_key = tag.keys()[0]
+ if tag.values():
+ eg_tag.tag_value = tag.values()[0]
+
+ eg_tags.append(eg_tag)
+
+ if len(eg_tags) > 0:
+ eg_launchspec.tags = eg_tags
+
+
+def expand_block_device_mappings(eg_launchspec, bdms):
+ if bdms is not None:
+ eg_bdms = []
+
+ for bdm in bdms:
+ eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
+
+ if bdm.get('ebs') is not None:
+ eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
+
+ eg_bdms.append(eg_bdm)
+
+ if len(eg_bdms) > 0:
+ eg_launchspec.block_device_mappings = eg_bdms
+
+
+def expand_network_interfaces(eg_launchspec, enis):
+ if enis is not None:
+ eg_enis = []
+
+ for eni in enis:
+ eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
+
+ eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
+
+ if eg_pias is not None:
+ eg_eni.private_ip_addresses = eg_pias
+
+ eg_enis.append(eg_eni)
+
+ if len(eg_enis) > 0:
+ eg_launchspec.network_interfaces = eg_enis
+
+
+def expand_scaling(eg, module):
+ up_scaling_policies = module.params['up_scaling_policies']
+ down_scaling_policies = module.params['down_scaling_policies']
+ target_tracking_policies = module.params['target_tracking_policies']
+
+ eg_scaling = spotinst.aws_elastigroup.Scaling()
+
+ if up_scaling_policies is not None:
+ eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
+ if len(eg_up_scaling_policies) > 0:
+ eg_scaling.up = eg_up_scaling_policies
+
+ if down_scaling_policies is not None:
+ eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
+ if len(eg_down_scaling_policies) > 0:
+ eg_scaling.down = eg_down_scaling_policies
+
+ if target_tracking_policies is not None:
+ eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
+ if len(eg_target_tracking_policies) > 0:
+ eg_scaling.target = eg_target_tracking_policies
+
+ if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
+ eg.scaling = eg_scaling
+
+
+def expand_list(items, fields, class_name):
+ if items is not None:
+ new_objects_list = []
+ for item in items:
+ new_obj = expand_fields(fields, item, class_name)
+ new_objects_list.append(new_obj)
+
+ return new_objects_list
+
+
+def expand_fields(fields, item, class_name):
+ class_ = getattr(spotinst.aws_elastigroup, class_name)
+ new_obj = class_()
+
+ # Handle primitive fields
+ if item is not None:
+ for field in fields:
+ if isinstance(field, dict):
+ ansible_field_name = field['ansible_field_name']
+ spotinst_field_name = field['spotinst_field_name']
+ else:
+ ansible_field_name = field
+ spotinst_field_name = field
+ if item.get(ansible_field_name) is not None:
+ setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
+
+ return new_obj
+
+
+def expand_scaling_policies(scaling_policies):
+ eg_scaling_policies = []
+
+ for policy in scaling_policies:
+ eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
+ eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
+ eg_scaling_policies.append(eg_policy)
+
+ return eg_scaling_policies
+
+
+def expand_target_tracking_policies(tracking_policies):
+ eg_tracking_policies = []
+
+ for policy in tracking_policies:
+ eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
+ eg_tracking_policies.append(eg_policy)
+
+ return eg_tracking_policies
+
+
+def main():
+ fields = dict(
+ account_id=dict(type='str'),
+ availability_vs_cost=dict(type='str', required=True),
+ availability_zones=dict(type='list', required=True),
+ block_device_mappings=dict(type='list'),
+ chef=dict(type='dict'),
+ credentials_path=dict(type='path', default="~/.spotinst/credentials"),
+ do_not_update=dict(default=[], type='list'),
+ down_scaling_policies=dict(type='list'),
+ draining_timeout=dict(type='int'),
+ ebs_optimized=dict(type='bool'),
+ ebs_volume_pool=dict(type='list'),
+ ecs=dict(type='dict'),
+ elastic_beanstalk=dict(type='dict'),
+ elastic_ips=dict(type='list'),
+ fallback_to_od=dict(type='bool'),
+ id=dict(type='str'),
+ health_check_grace_period=dict(type='int'),
+ health_check_type=dict(type='str'),
+ health_check_unhealthy_duration_before_replacement=dict(type='int'),
+ iam_role_arn=dict(type='str'),
+ iam_role_name=dict(type='str'),
+ image_id=dict(type='str', required=True),
+ key_pair=dict(type='str'),
+ kubernetes=dict(type='dict'),
+ lifetime_period=dict(type='int'),
+ load_balancers=dict(type='list'),
+ max_size=dict(type='int', required=True),
+ mesosphere=dict(type='dict'),
+ min_size=dict(type='int', required=True),
+ monitoring=dict(type='str'),
+ multai_load_balancers=dict(type='list'),
+ multai_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ network_interfaces=dict(type='list'),
+ on_demand_count=dict(type='int'),
+ on_demand_instance_type=dict(type='str'),
+ opsworks=dict(type='dict'),
+ persistence=dict(type='dict'),
+ product=dict(type='str', required=True),
+ rancher=dict(type='dict'),
+ right_scale=dict(type='dict'),
+ risk=dict(type='int'),
+ roll_config=dict(type='dict'),
+ scheduled_tasks=dict(type='list'),
+ security_group_ids=dict(type='list', required=True),
+ shutdown_script=dict(type='str'),
+ signals=dict(type='list'),
+ spin_up_time=dict(type='int'),
+ spot_instance_types=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list'),
+ target=dict(type='int', required=True),
+ target_group_arns=dict(type='list'),
+ tenancy=dict(type='str'),
+ terminate_at_end_of_billing_hour=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ unit=dict(type='str'),
+ user_data=dict(type='str'),
+ utilize_reserved_instances=dict(type='bool'),
+ uniqueness_by=dict(default='name', choices=['name', 'id']),
+ up_scaling_policies=dict(type='list'),
+ target_tracking_policies=dict(type='list'),
+ wait_for_instances=dict(type='bool', default=False),
+ wait_timeout=dict(type='int')
+ )
+
+ module = AnsibleModule(argument_spec=fields)
+
+ if not HAS_SPOTINST_SDK:
+ module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
+
+ # Retrieve creds file variables
+ creds_file_loaded_vars = dict()
+
+ credentials_path = module.params.get('credentials_path')
+
+ try:
+ with open(credentials_path, "r") as creds:
+ for line in creds:
+ eq_index = line.find('=')
+ var_name = line[:eq_index].strip()
+ string_value = line[eq_index + 1:].strip()
+ creds_file_loaded_vars[var_name] = string_value
+ except IOError:
+ pass
+ # End of creds file retrieval
+
+ token = module.params.get('token')
+ if not token:
+ token = os.environ.get('SPOTINST_TOKEN')
+ if not token:
+ token = creds_file_loaded_vars.get("token")
+
+ account = module.params.get('account_id')
+ if not account:
+ account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
+ if not account:
+ account = creds_file_loaded_vars.get("account")
+
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False)
+
+ if account is not None:
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
+
+ group_id, message, has_changed = handle_elastigroup(client=client, module=module)
+
+ instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
+
+ module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
new file mode 100644
index 00000000..04604c09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+short_description: Manage HPE StoreServ 3PAR CPG
+author:
+ - Farhan Nomani (@farhan7500)
+ - Gautham P Hegde (@gautamphegde)
+description:
+ - Create and delete CPG on HPE 3PAR.
+module: ss_3par_cpg
+options:
+ cpg_name:
+ description:
+ - Name of the CPG.
+ type: str
+ required: true
+ disk_type:
+ choices:
+ - FC
+ - NL
+ - SSD
+ description:
+ - Specifies that physical disks must have the specified device type.
+ type: str
+ domain:
+ description:
+ - Specifies the name of the domain in which the object will reside.
+ type: str
+ growth_increment:
+ description:
+ - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
+ created on each auto-grow operation.
+ type: str
+ growth_limit:
+ description:
+ - Specifies that the autogrow operation is limited to the specified
+ storage amount that sets the growth limit(in MiB, GiB or TiB).
+ type: str
+ growth_warning:
+ description:
+ - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
+ results in a warning alert.
+ type: str
+ high_availability:
+ choices:
+ - PORT
+ - CAGE
+ - MAG
+ description:
+ - Specifies that the layout must support the failure of one port pair,
+ one cage, or one magazine.
+ type: str
+ raid_type:
+ choices:
+ - R0
+ - R1
+ - R5
+ - R6
+ description:
+ - Specifies the RAID type for the logical disk.
+ type: str
+ set_size:
+ description:
+ - Specifies the set size in the number of chunklets.
+ type: int
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the specified CPG should exist or not.
+ required: true
+ type: str
+ secure:
+ description:
+ - Specifies whether the certificate needs to be validated while communicating.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.hpe3par
+
+'''
+
+
+EXAMPLES = r'''
+- name: Create CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: present
+ cpg_name: sample_cpg
+ domain: sample_domain
+ growth_increment: 32000 MiB
+ growth_limit: 64000 MiB
+ growth_warning: 48000 MiB
+ raid_type: R6
+ set_size: 8
+ high_availability: MAG
+ disk_type: FC
+ secure: no
+
+- name: Delete CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: absent
+ cpg_name: sample_cpg
+ secure: no
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+try:
+ from hpe3par_sdk import client
+ from hpe3parclient import exceptions
+ HAS_3PARCLIENT = True
+except ImportError:
+ HAS_3PARCLIENT = False
+
+
+def validate_set_size(raid_type, set_size):
+ if raid_type:
+ set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
+ if set_size in set_size_array:
+ return True
+ return False
+
+
+def cpg_ldlayout_map(ldlayout_dict):
+ if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
+ ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
+ ldlayout_dict['RAIDType']]['raid_value']
+ if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
+ ldlayout_dict['HA'] = getattr(
+ client.HPE3ParClient, ldlayout_dict['HA'])
+ return ldlayout_dict
+
+
+def create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type):
+ try:
+ if not validate_set_size(raid_type, set_size):
+ return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
+ if not client_obj.cpgExists(cpg_name):
+
+ disk_patterns = []
+ if disk_type:
+ disk_type = getattr(client.HPE3ParClient, disk_type)
+ disk_patterns = [{'diskType': disk_type}]
+ ld_layout = {
+ 'RAIDType': raid_type,
+ 'setSize': set_size,
+ 'HA': high_availability,
+ 'diskPatterns': disk_patterns}
+ ld_layout = cpg_ldlayout_map(ld_layout)
+ if growth_increment is not None:
+ growth_increment = hpe3par.convert_to_binary_multiple(
+ growth_increment)
+ if growth_limit is not None:
+ growth_limit = hpe3par.convert_to_binary_multiple(
+ growth_limit)
+ if growth_warning is not None:
+ growth_warning = hpe3par.convert_to_binary_multiple(
+ growth_warning)
+ optional = {
+ 'domain': domain,
+ 'growthIncrementMiB': growth_increment,
+ 'growthLimitMiB': growth_limit,
+ 'usedLDWarningAlertMiB': growth_warning,
+ 'LDLayout': ld_layout}
+ client_obj.createCPG(cpg_name, optional)
+ else:
+ return (True, False, "CPG already present")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG creation failed | %s" % (e))
+ return (True, True, "Created CPG %s successfully." % cpg_name)
+
+
+def delete_cpg(
+ client_obj,
+ cpg_name):
+ try:
+ if client_obj.cpgExists(cpg_name):
+ client_obj.deleteCPG(cpg_name)
+ else:
+ return (True, False, "CPG does not exist")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG delete failed | %s" % e)
+ return (True, True, "Deleted CPG %s successfully." % cpg_name)
+
+
+def main():
+ module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+ if not HAS_3PARCLIENT:
+ module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
+
+ if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
+ module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
+
+ storage_system_ip = module.params["storage_system_ip"]
+ storage_system_username = module.params["storage_system_username"]
+ storage_system_password = module.params["storage_system_password"]
+ cpg_name = module.params["cpg_name"]
+ domain = module.params["domain"]
+ growth_increment = module.params["growth_increment"]
+ growth_limit = module.params["growth_limit"]
+ growth_warning = module.params["growth_warning"]
+ raid_type = module.params["raid_type"]
+ set_size = module.params["set_size"]
+ high_availability = module.params["high_availability"]
+ disk_type = module.params["disk_type"]
+ secure = module.params["secure"]
+
+ wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
+ try:
+ client_obj = client.HPE3ParClient(wsapi_url, secure)
+ except exceptions.SSLCertFailed:
+ module.fail_json(msg="SSL Certificate Failed")
+ except exceptions.ConnectionError:
+ module.fail_json(msg="Connection Error")
+ except exceptions.UnsupportedVersion:
+ module.fail_json(msg="Unsupported WSAPI version")
+ except Exception as e:
+ module.fail_json(msg="Initializing client failed. %s" % e)
+
+ if storage_system_username is None or storage_system_password is None:
+ module.fail_json(msg="Storage system username or password is None")
+ if cpg_name is None:
+ module.fail_json(msg="CPG Name is None")
+
+ # States
+ if module.params["state"] == "present":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ elif module.params["state"] == "absent":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = delete_cpg(
+ client_obj,
+ cpg_name
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ if return_status:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py
new file mode 100644
index 00000000..8e2d19a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ type: str
+ description:
+ - API key.
+ required: true
+ event:
+ type: str
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: true
+ revision_id:
+ type: str
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ deployed_by:
+ type: str
+ description:
+ - The person or robot responsible for deploying the code
+ default: "Ansible"
+ deployed_to:
+ type: str
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ repository:
+ type: str
+ description:
+ - The repository (or project) deployed
+ msg:
+ type: str
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ annotated_by:
+ type: str
+ description:
+ - The person or robot who the annotation should be attributed to.
+ default: "Ansible"
+ level:
+ type: str
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ default: 'INFO'
+ instance_id:
+ type: str
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ event_epoch:
+ type: str
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+'''
+
+EXAMPLES = '''
+- name: Send a code deploy event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- name: Send an annotation event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict( # @TODO add types
+ key=dict(required=True, no_log=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(), # @TODO int?
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception as e:
+ module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception as e:
+ module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py
new file mode 100644
index 00000000..372ba2df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: stacki_host
+short_description: Add or remove host to stacki front-end
+description:
+ - Use this module to add or remove hosts to a stacki front-end via API.
+ - U(https://github.com/StackIQ/stacki)
+options:
+ name:
+ description:
+ - Name of the host to be added to Stacki.
+ required: True
+ type: str
+ stacki_user:
+ description:
+ - Username for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_user) is used instead.
+ required: True
+ type: str
+ stacki_password:
+ description:
+ - Password for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_password) is used instead.
+ required: True
+ type: str
+ stacki_endpoint:
+ description:
+ - URL for the Stacki API Endpoint.
+ required: True
+ type: str
+ prim_intf_mac:
+ description:
+ - MAC Address for the primary PXE boot network interface.
+ type: str
+ prim_intf_ip:
+ description:
+ - IP Address for the primary network interface.
+ type: str
+ prim_intf:
+ description:
+ - Name of the primary network interface.
+ type: str
+ force_install:
+ description:
+ - Set value to True to force node into install state if it already exists in stacki.
+ type: bool
+ state:
+ description:
+ - Set value to the desired state for the specified host.
+ type: str
+ choices: [ absent, present ]
+author:
+- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
+'''
+
+EXAMPLES = '''
+- name: Add a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ prim_intf_mac: mac_addr
+ prim_intf_ip: x.x.x.x
+ prim_intf: eth0
+
+- name: Remove a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: response to whether or not the api call completed successfully
+ returned: always
+ type: bool
+ sample: true
+
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: the value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class StackiHost(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.hostname = module.params['name']
+ self.rack = module.params['rack']
+ self.rank = module.params['rank']
+ self.appliance = module.params['appliance']
+ self.prim_intf = module.params['prim_intf']
+ self.prim_intf_ip = module.params['prim_intf_ip']
+ self.network = module.params['network']
+ self.prim_intf_mac = module.params['prim_intf_mac']
+ self.endpoint = module.params['stacki_endpoint']
+
+ auth_creds = {'USERNAME': module.params['stacki_user'],
+ 'PASSWORD': module.params['stacki_password']}
+
+ # Get Initial CSRF
+ cred_a = self.do_request(self.module, self.endpoint, method="GET")
+ cookie_a = cred_a.headers.get('Set-Cookie').split(';')
+ init_csrftoken = None
+ for c in cookie_a:
+ if "csrftoken" in c:
+ init_csrftoken = c.replace("csrftoken=", "")
+ init_csrftoken = init_csrftoken.rstrip("\r\n")
+ break
+
+ # Make Header Dictionary with initial CSRF
+ header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
+ 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
+
+ # Endpoint to get final authentication header
+ login_endpoint = self.endpoint + "/login"
+
+ # Get Final CSRF and Session ID
+ login_req = self.do_request(self.module, login_endpoint, headers=header,
+ payload=urlencode(auth_creds), method='POST')
+
+ cookie_f = login_req.headers.get('Set-Cookie').split(';')
+ csrftoken = None
+ for f in cookie_f:
+ if "csrftoken" in f:
+ csrftoken = f.replace("csrftoken=", "")
+ if "sessionid" in f:
+ sessionid = c.split("sessionid=", 1)[-1]
+ sessionid = sessionid.rstrip("\r\n")
+
+ self.header = {'csrftoken': csrftoken,
+ 'X-CSRFToken': csrftoken,
+ 'sessionid': sessionid,
+ 'Content-type': 'application/json',
+ 'Cookie': login_req.headers.get('Set-Cookie')}
+
+ def do_request(self, module, url, payload=None, headers=None, method=None):
+ res, info = fetch_url(module, url, data=payload, headers=headers, method=method)
+
+ if info['status'] != 200:
+ self.module.fail_json(changed=False, msg=info['msg'])
+
+ return res
+
+ def stack_check_host(self):
+ res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
+
+ if self.hostname in res.read():
+ return True
+ else:
+ return False
+
+ def stack_sync(self):
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
+
+ def stack_force_install(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "set host boot {0} action=install" \
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+ changed = True
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_add(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
+ .format(self.hostname, self.rack, self.rank, self.appliance)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_remove(self, result):
+ data = dict()
+
+ data['cmd'] = "remove host {0}"\
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = True
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ rack=dict(type='int', default=0),
+ rank=dict(type='int', default=0),
+ appliance=dict(type='str', default='backend'),
+ prim_intf=dict(type='str'),
+ prim_intf_ip=dict(type='str'),
+ network=dict(type='str', default='private'),
+ prim_intf_mac=dict(type='str'),
+ stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')),
+ stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True),
+ stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')),
+ force_install=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+ missing_params = list()
+
+ stacki = StackiHost(module)
+ host_exists = stacki.stack_check_host()
+
+ # If state is present, but host exists, need force_install flag to put host back into install state
+ if module.params['state'] == 'present' and host_exists and module.params['force_install']:
+ stacki.stack_force_install(result)
+ # If state is present, but host exists, and force_install and false, do nothing
+ elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
+ result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
+ .format(module.params['name'])
+ # Otherwise, state is present, but host doesn't exists, require more params to add host
+ elif module.params['state'] == 'present' and not host_exists:
+ for param in ['appliance', 'prim_intf',
+ 'prim_intf_ip', 'network', 'prim_intf_mac']:
+ if not module.params[param]:
+ missing_params.append(param)
+ if len(missing_params) > 0: # @FIXME replace with required_if
+ module.fail_json(msg="missing required arguments: {0}".format(missing_params))
+
+ stacki.stack_add(result)
+ # If state is absent, and host exists, lets remove it.
+ elif module.params['state'] == 'absent' and host_exists:
+ stacki.stack_remove(result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
new file mode 100644
index 00000000..0414f6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ type: str
+ description:
+ - A descriptive title for the maintenance window
+ default: "A new maintenance window"
+ desc:
+ type: str
+ description:
+ - Message describing the maintenance window
+ default: "Created by Ansible"
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ type: str
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ type: str
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ type: str
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ type: str
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ default: "https://api.status.io"
+ components:
+ type: list
+ description:
+ - The given name of your component (server name)
+ aliases: ['component']
+ containers:
+ type: list
+ description:
+ - The given name of your container (data center)
+ aliases: ['container']
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ type: bool
+ default: 'no'
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ type: bool
+ default: 'no'
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ type: bool
+ default: 'no'
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_id:
+ type: str
+ description:
+ - The maintenance id number when deleting a maintenance window
+ minutes:
+ type: int
+ description:
+ - The length of time in UTC that the maintenance will run
+ (starting from playbook runtime)
+ default: 10
+ start_date:
+ type: str
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ start_time:
+ type: str
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+'''
+
+EXAMPLES = '''
+- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
+ community.general.statusio_maintenance:
+ title: Router Upgrade from ansible
+ desc: Performing a Router Upgrade
+ components: server1.example.com
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+
+- name: Create a maintenance window for 60 minutes on server1 and server2
+ community.general.statusio_maintenance:
+ title: Routine maintenance
+ desc: Some security updates
+ components:
+ - server1.example.com
+ - server2.example.com
+ minutes: 60
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+ delegate_to: localhost
+
+- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
+ community.general.statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: Primary Data Center
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ start_date: 01/01/2016
+ start_time: 12:00
+ minutes: 1440
+
+- name: Delete a maintenance window
+ community.general.statusio_maintenance:
+ title: Remove a maintenance window
+ maintenance_id: 561f90faf74bc94a4700087b
+ statuspage: statuspage_id
+ api_id: api_id
+ api_key: api_key
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except Exception as e:
+ return 1, None, None, to_native(e)
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py
new file mode 100644
index 00000000..dfac03ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: emc_vnx_sg_member
+
+short_description: Manage storage group member on EMC VNX
+
+
+description:
+ - "This module manages the members of an existing storage group."
+
+extends_documentation_fragment:
+- community.general.emc.emc_vnx
+
+
+options:
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ lunid:
+ description:
+ - Lun id to be added.
+ required: true
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - C(present) ensures specified lunid is present in the Storage Group.
+ - C(absent) ensures specified lunid is absent from Storage Group.
+ default: present
+ choices: [ "present", "absent"]
+
+
+author:
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+'''
+
+EXAMPLES = '''
+- name: Add lun to storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: present
+
+- name: Remove lun from storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: absent
+'''
+
+RETURN = '''
+hluid:
+ description: LUNID that hosts attached to the storage group will see.
+ type: int
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
+
+LIB_IMP_ERR = None
+try:
+ from storops import VNXSystem
+ from storops.exception import VNXCredentialError, VNXStorageGroupError, \
+ VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
+ HAS_LIB = True
+except Exception:
+ LIB_IMP_ERR = traceback.format_exc()
+ HAS_LIB = False
+
+
+def run_module():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ lunid=dict(type='int', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_args.update(emc_vnx_argument_spec)
+
+ result = dict(
+ changed=False,
+ hluid=None
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
+ exception=LIB_IMP_ERR)
+
+ sp_user = module.params['sp_user']
+ sp_address = module.params['sp_address']
+ sp_password = module.params['sp_password']
+ alu = module.params['lunid']
+
+ # if the user is working with this module in only check mode we do not
+ # want to make any changes to the environment, just return the current
+ # state with no modifications
+ if module.check_mode:
+ return result
+
+ try:
+ vnx = VNXSystem(sp_address, sp_user, sp_password)
+ sg = vnx.get_sg(module.params['name'])
+ if sg.existed:
+ if module.params['state'] == 'present':
+ if not sg.has_alu(alu):
+ try:
+ result['hluid'] = sg.attach_alu(alu)
+ result['changed'] = True
+ except VNXAluAlreadyAttachedError:
+ result['hluid'] = sg.get_hlu(alu)
+ except (VNXAttachAluError, VNXStorageGroupError) as e:
+ module.fail_json(msg='Error attaching {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ result['hluid'] = sg.get_hlu(alu)
+ if module.params['state'] == 'absent' and sg.has_alu(alu):
+ try:
+ sg.detach_alu(alu)
+ result['changed'] = True
+ except VNXDetachAluNotFoundError:
+ # being not attached when using absent is OK
+ pass
+ except VNXStorageGroupError as e:
+ module.fail_json(msg='Error detaching alu {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ module.fail_json(msg='No such storage group named '
+ '{0}'.format(module.params['name']),
+ **result)
+ except VNXCredentialError as e:
+ module.fail_json(msg='{0}'.format(to_native(e)), **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py
new file mode 100644
index 00000000..46306585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_heal_info
+short_description: Gather information on self-heal or rebalance status
+author: "Devyani Kota (@devyanikota)"
+description:
+ - Gather facts about either self-heal or rebalance status.
+ - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)!
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ status_filter:
+ default: "self-heal"
+ choices: ["self-heal", "rebalance"]
+ description:
+ - Determines which facts are to be returned.
+ - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
+ - If the C(status_filter) is C(rebalance), rebalance status is returned.
+requirements:
+ - GlusterFS > 3.2
+'''
+
+EXAMPLES = '''
+- name: Gather self-heal facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: self-heal
+ register: self_heal_status
+- ansible.builtin.debug:
+ var: self_heal_status
+
+- name: Gather rebalance facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: rebalance
+ register: rebalance_status
+- ansible.builtin.debug:
+ var: rebalance_status
+'''
+
+RETURN = '''
+name:
+ description: GlusterFS volume name
+ returned: always
+ type: str
+status_filter:
+ description: Whether self-heal or rebalance status is to be returned
+ returned: always
+ type: str
+heal_info:
+ description: List of files that still need healing process
+ returned: On success
+ type: list
+rebalance_status:
+ description: Status of rebalance operation
+ returned: On success
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import LooseVersion
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def get_self_heal_status(name):
+ out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ heal_info = []
+ # return files that still need healing.
+ for line in raw_out:
+ if 'Brick' in line:
+ br_dict = {}
+ br_dict['brick'] = line.strip().strip("Brick")
+ elif 'Status' in line:
+ br_dict['status'] = line.split(":")[1].strip()
+ elif 'Number' in line:
+ br_dict['no_of_entries'] = line.split(":")[1].strip()
+ elif line.startswith('/') or line.startswith('<') or '\n' in line:
+ continue
+ else:
+ br_dict and heal_info.append(br_dict)
+ br_dict = {}
+ return heal_info
+
+
+def get_rebalance_status(name):
+ out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ rebalance_status = []
+ # return the files that are either still 'in progress' state or 'completed'.
+ for line in raw_out:
+ line = " ".join(line.split())
+ line_vals = line.split(" ")
+ if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
+ continue
+ node_dict = {}
+ if len(line_vals) == 1 or len(line_vals) == 4:
+ continue
+ node_dict['node'] = line_vals[0]
+ node_dict['rebalanced_files'] = line_vals[1]
+ node_dict['failures'] = line_vals[4]
+ if 'in progress' in line:
+ node_dict['status'] = line_vals[5] + line_vals[6]
+ rebalance_status.append(node_dict)
+ elif 'completed' in line:
+ node_dict['status'] = line_vals[5]
+ rebalance_status.append(node_dict)
+ return rebalance_status
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+def main():
+ global module
+ global glusterbin
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
+ ),
+ )
+ is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts')
+ if is_old_facts:
+ module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ glusterbin = module.get_bin_path('gluster', True)
+ required_version = "3.2"
+ status_filter = module.params['status_filter']
+ volume_name = module.params['name']
+ heal_info = ''
+ rebalance_status = ''
+
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+
+ try:
+ if status_filter == "self-heal":
+ heal_info = get_self_heal_status(volume_name)
+ elif status_filter == "rebalance":
+ rebalance_status = get_rebalance_status(volume_name)
+ except Exception as e:
+ module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
+
+ facts = {}
+ facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=facts)
+ else:
+ module.exit_json(**facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py
new file mode 100644
index 00000000..e9e6fd71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
+# Copyright 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_peer
+short_description: Attach/Detach peers to/from the cluster
+description:
+ - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
+ added into an existing trusted storage pool or a new storage pool can be
+ formed. Or, nodes can be removed from an existing trusted storage pool.
+author: Sachidananda Urs (@sac)
+options:
+ state:
+ choices: ["present", "absent"]
+ default: "present"
+ description:
+ - Determines whether the nodes should be attached to the pool or
+ removed from the pool. If the state is present, nodes will be
+ attached to the pool. If state is absent, nodes will be detached
+ from the pool.
+ type: str
+ nodes:
+ description:
+ - List of nodes that have to be probed into the pool.
+ required: true
+ type: list
+ force:
+ type: bool
+ default: false
+ description:
+ - Applicable only while removing the nodes from the pool. gluster
+ will refuse to detach a node from the pool if any one of the node
+ is down, in such cases force can be used.
+requirements:
+ - GlusterFS > 3.2
+notes:
+ - This module does not support check mode.
+'''
+
+EXAMPLES = '''
+- name: Create a trusted storage pool
+ community.general.gluster_peer:
+ state: present
+ nodes:
+ - 10.0.1.5
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool by force
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.0.1
+ force: true
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from distutils.version import LooseVersion
+
+
+class Peer(object):
+ def __init__(self, module):
+ self.module = module
+ self.state = self.module.params['state']
+ self.nodes = self.module.params['nodes']
+ self.glustercmd = self.module.get_bin_path('gluster', True)
+ self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ self.action = ''
+ self.force = ''
+
+ def gluster_peer_ops(self):
+ if not self.nodes:
+ self.module.fail_json(msg="nodes list cannot be empty")
+ self.force = 'force' if self.module.params.get('force') else ''
+ if self.state == 'present':
+ self.nodes = self.get_to_be_probed_hosts(self.nodes)
+ self.action = 'probe'
+ # In case of peer probe, we do not need `force'
+ self.force = ''
+ else:
+ self.action = 'detach'
+ self.call_peer_commands()
+
+ def get_to_be_probed_hosts(self, hosts):
+ peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
+ rc, output, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ peers_in_cluster = [line.split('\t')[1].strip() for
+ line in filter(None, output.split('\n')[1:])]
+ try:
+ peers_in_cluster.remove('localhost')
+ except ValueError:
+ # It is ok not to have localhost in list
+ pass
+ hosts_to_be_probed = [host for host in hosts if host not in
+ peers_in_cluster]
+ return hosts_to_be_probed
+
+ def call_peer_commands(self):
+ result = {}
+ result['msg'] = ''
+ result['changed'] = False
+
+ for node in self.nodes:
+ peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
+ if self.force:
+ peercmd.append(self.force)
+ rc, out, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ if rc:
+ result['rc'] = rc
+ result['msg'] = err
+ # Fail early, do not wait for the loop to finish
+ self.module.fail_json(**result)
+ else:
+ if 'already in peer' in out or \
+ 'localhost not needed' in out:
+ result['changed'] |= False
+ else:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', required=False, default=False),
+ nodes=dict(type='list', required=True),
+ state=dict(type='str', choices=['absent', 'present'],
+ default='present'),
+ ),
+ supports_check_mode=False
+ )
+ pops = Peer(module)
+ required_version = "3.2"
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+ pops.gluster_peer_ops()
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py
new file mode 100644
index 00000000..d6444ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gluster_volume
+short_description: Manage GlusterFS volumes
+description:
+ - Create, remove, start, stop and tune GlusterFS volumes
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ state:
+ description:
+ - Use present/absent ensure if a volume exists or not.
+ Use started/stopped to control its availability.
+ required: true
+ choices: ['absent', 'present', 'started', 'stopped']
+ cluster:
+ description:
+ - List of hosts to use for probing and brick setup.
+ host:
+ description:
+ - Override local hostname (for peer probing purposes).
+ replicas:
+ description:
+ - Replica count for volume.
+ arbiters:
+ description:
+ - Arbiter count for volume.
+ stripes:
+ description:
+ - Stripe count for volume.
+ disperses:
+ description:
+ - Disperse count for volume.
+ redundancies:
+ description:
+ - Redundancy count for volume.
+ transport:
+ description:
+ - Transport type for volume.
+ default: tcp
+ choices: [ tcp, rdma, 'tcp,rdma' ]
+ bricks:
+ description:
+ - Brick paths on servers. Multiple brick paths can be separated by commas.
+ aliases: [ brick ]
+ start_on_create:
+ description:
+ - Controls whether the volume is started after creation or not.
+ type: bool
+ default: 'yes'
+ rebalance:
+ description:
+ - Controls whether the cluster is rebalanced after changes.
+ type: bool
+ default: 'no'
+ directory:
+ description:
+ - Directory for limit-usage.
+ options:
+ description:
+ - A dictionary/hash with options/settings for the volume.
+ quota:
+ description:
+ - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
+ force:
+ description:
+ - If brick is being created in the root partition, module will fail.
+ Set force to true to override this behaviour.
+ type: bool
+ default: false
+notes:
+ - Requires cli tools for GlusterFS on servers.
+ - Will add new bricks, but not remove them.
+author:
+- Taneli Leppä (@rosmo)
+'''
+
+EXAMPLES = """
+- name: Create gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ bricks: /bricks/brick1/g1
+ rebalance: yes
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Tune
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ performance.cache-size: 256MB
+
+- name: Set multiple options on GlusterFS volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ { performance.cache-size: 128MB,
+ write-behind: 'off',
+ quick-read: 'on'
+ }
+
+- name: Start gluster volume
+ community.general.gluster_volume:
+ state: started
+ name: test1
+
+- name: Limit usage
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ directory: /foo
+ quota: 20.0MB
+
+- name: Stop gluster volume
+ community.general.gluster_volume:
+ state: stopped
+ name: test1
+
+- name: Remove gluster volume
+ community.general.gluster_volume:
+ state: absent
+ name: test1
+
+- name: Create gluster volume with multiple bricks
+ community.general.gluster_volume:
+ state: present
+ name: test2
+ bricks: /bricks/brick1/g2,/bricks/brick2/g2
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Remove the bricks from gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick1/b1,/bricks/brick2/b2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+
+- name: Reduce cluster configuration
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick3/b1,/bricks/brick4/b2
+ replicas: 2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+"""
+
+import re
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def run_gluster_nofail(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ return None
+ return out
+
+
+def get_peers():
+ out = run_gluster(['peer', 'status'])
+ peers = {}
+ hostname = None
+ uuid = None
+ state = None
+ shortNames = False
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'hostname':
+ hostname = value
+ shortNames = False
+ if key.lower() == 'uuid':
+ uuid = value
+ if key.lower() == 'state':
+ state = value
+ peers[hostname] = [uuid, state]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames is True:
+ peers[row] = [uuid, state]
+ elif row == '':
+ shortNames = False
+ return peers
+
+
+def get_volumes():
+ out = run_gluster(['volume', 'info'])
+
+ volumes = {}
+ volume = {}
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'volume name':
+ volume['name'] = value
+ volume['options'] = {}
+ volume['quota'] = False
+ if key.lower() == 'volume id':
+ volume['id'] = value
+ if key.lower() == 'status':
+ volume['status'] = value
+ if key.lower() == 'transport-type':
+ volume['transport'] = value
+ if value.lower().endswith(' (arbiter)'):
+ if 'arbiters' not in volume:
+ volume['arbiters'] = []
+ value = value[:-10]
+ volume['arbiters'].append(value)
+ elif key.lower() == 'number of bricks':
+ volume['replicas'] = value[-1:]
+ if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
+ if 'bricks' not in volume:
+ volume['bricks'] = []
+ volume['bricks'].append(value)
+ # Volume options
+ if '.' in key:
+ if 'options' not in volume:
+ volume['options'] = {}
+ volume['options'][key] = value
+ if key == 'features.quota' and value == 'on':
+ volume['quota'] = True
+ else:
+ if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
+ if len(volume) > 0:
+ volumes[volume['name']] = volume
+ volume = {}
+ return volumes
+
+
+def get_quotas(name, nofail):
+ quotas = {}
+ if nofail:
+ out = run_gluster_nofail(['volume', 'quota', name, 'list'])
+ if not out:
+ return quotas
+ else:
+ out = run_gluster(['volume', 'quota', name, 'list'])
+ for row in out.split('\n'):
+ if row[:1] == '/':
+ q = re.split(r'\s+', row)
+ quotas[q[0]] = q[1]
+ return quotas
+
+
+def wait_for_peer(host):
+ for x in range(0, 4):
+ peers = get_peers()
+ if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
+ return True
+ time.sleep(1)
+ return False
+
+
+def probe(host, myhostname):
+ global module
+ out = run_gluster(['peer', 'probe', host])
+ if out.find('localhost') == -1 and not wait_for_peer(host):
+ module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
+
+
+def probe_all_peers(hosts, peers, myhostname):
+ for host in hosts:
+ host = host.strip() # Clean up any extra space for exact comparison
+ if host not in peers:
+ probe(host, myhostname)
+
+
+def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
+ args = ['volume', 'create']
+ args.append(name)
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ if arbiter:
+ args.append('arbiter')
+ args.append(str(arbiter))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
+ args.append('transport')
+ args.append(transport)
+ for brick in bricks:
+ for host in hosts:
+ args.append(('%s:%s' % (host, brick)))
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def start_volume(name):
+ run_gluster(['volume', 'start', name])
+
+
+def stop_volume(name):
+ run_gluster(['volume', 'stop', name])
+
+
+def set_volume_option(name, option, parameter):
+ run_gluster(['volume', 'set', name, option, parameter])
+
+
+def add_bricks(name, new_bricks, stripe, replica, force):
+ args = ['volume', 'add-brick', name]
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ args.extend(new_bricks)
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def remove_bricks(name, removed_bricks, force):
+ # max-tries=12 with default_interval=10 secs
+ max_tries = 12
+ retries = 0
+ success = False
+ args = ['volume', 'remove-brick', name]
+ args.extend(removed_bricks)
+ # create a copy of args to use for commit operation
+ args_c = args[:]
+ args.append('start')
+ run_gluster(args)
+ # remove-brick operation needs to be followed by commit operation.
+ if not force:
+ module.fail_json(msg="Force option is mandatory.")
+ else:
+ while retries < max_tries:
+ last_brick = removed_bricks[-1]
+ out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
+ for row in out.split('\n')[1:]:
+ if 'completed' in row:
+ # remove-brick successful, call commit operation.
+ args_c.append('commit')
+ out = run_gluster(args_c)
+ success = True
+ break
+ else:
+ time.sleep(10)
+ if success:
+ break
+ retries += 1
+ if not success:
+ # remove-brick still in process, needs to be committed after completion.
+ module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
+ "Commit operation needs to be followed.")
+
+
+def reduce_config(name, removed_bricks, replicas, force):
+ out = run_gluster(['volume', 'heal', name, 'info'])
+ summary = out.split("\n")
+ for line in summary:
+ if 'Number' in line and int(line.split(":")[1].strip()) != 0:
+ module.fail_json(msg="Operation aborted, self-heal in progress.")
+ args = ['volume', 'remove-brick', name, 'replica', replicas]
+ args.extend(removed_bricks)
+ if force:
+ args.append('force')
+ else:
+ module.fail_json(msg="Force option is mandatory")
+ run_gluster(args)
+
+
+def do_rebalance(name):
+ run_gluster(['volume', 'rebalance', name, 'start'])
+
+
+def enable_quota(name):
+ run_gluster(['volume', 'quota', name, 'enable'])
+
+
+def set_quota(name, directory, value):
+ run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
+
+
+def main():
+ # MAIN
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
+ cluster=dict(type='list'),
+ host=dict(type='str'),
+ stripes=dict(type='int'),
+ replicas=dict(type='int'),
+ arbiters=dict(type='int'),
+ disperses=dict(type='int'),
+ redundancies=dict(type='int'),
+ transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
+ bricks=dict(type='str', aliases=['brick']),
+ start_on_create=dict(type='bool', default=True),
+ rebalance=dict(type='bool', default=False),
+ options=dict(type='dict', default={}),
+ quota=dict(type='str'),
+ directory=dict(type='str'),
+ force=dict(type='bool', default=False),
+ ),
+ )
+
+ global glusterbin
+ glusterbin = module.get_bin_path('gluster', True)
+
+ changed = False
+
+ action = module.params['state']
+ volume_name = module.params['name']
+ cluster = module.params['cluster']
+ brick_paths = module.params['bricks']
+ stripes = module.params['stripes']
+ replicas = module.params['replicas']
+ arbiters = module.params['arbiters']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
+ transport = module.params['transport']
+ myhostname = module.params['host']
+ start_on_create = module.boolean(module.params['start_on_create'])
+ rebalance = module.boolean(module.params['rebalance'])
+ force = module.boolean(module.params['force'])
+
+ if not myhostname:
+ myhostname = socket.gethostname()
+
+ # Clean up if last element is empty. Consider that yml can look like this:
+ # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
+ if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
+ cluster = cluster[0:-1]
+
+ if cluster is None:
+ cluster = []
+
+ if brick_paths is not None and "," in brick_paths:
+ brick_paths = brick_paths.split(",")
+ else:
+ brick_paths = [brick_paths]
+
+ options = module.params['options']
+ quota = module.params['quota']
+ directory = module.params['directory']
+
+ # get current state info
+ peers = get_peers()
+ volumes = get_volumes()
+ quotas = {}
+ if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
+ quotas = get_quotas(volume_name, True)
+
+ # do the work!
+ if action == 'absent':
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ run_gluster(['volume', 'delete', volume_name])
+ changed = True
+
+ if action == 'present':
+ probe_all_peers(cluster, peers, myhostname)
+
+ # create if it doesn't exist
+ if volume_name not in volumes:
+ create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
+ volumes = get_volumes()
+ changed = True
+
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
+ start_volume(volume_name)
+ changed = True
+
+ # switch bricks
+ new_bricks = []
+ removed_bricks = []
+ all_bricks = []
+ bricks_in_volume = volumes[volume_name]['bricks']
+
+ for node in cluster:
+ for brick_path in brick_paths:
+ brick = '%s:%s' % (node, brick_path)
+ all_bricks.append(brick)
+ if brick not in bricks_in_volume:
+ new_bricks.append(brick)
+
+ if not new_bricks and len(all_bricks) > 0 and \
+ len(all_bricks) < len(bricks_in_volume):
+ for brick in bricks_in_volume:
+ if brick not in all_bricks:
+ removed_bricks.append(brick)
+
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, stripes, replicas, force)
+ changed = True
+
+ if removed_bricks:
+ if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
+ reduce_config(volume_name, removed_bricks, str(replicas), force)
+ else:
+ remove_bricks(volume_name, removed_bricks, force)
+ changed = True
+
+ # handle quotas
+ if quota:
+ if not volumes[volume_name]['quota']:
+ enable_quota(volume_name)
+ quotas = get_quotas(volume_name, False)
+ if directory not in quotas or quotas[directory] != quota:
+ set_quota(volume_name, directory, quota)
+ changed = True
+
+ # set options
+ for option in options.keys():
+ if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
+ set_volume_option(volume_name, option, options[option])
+ changed = True
+
+ else:
+ module.fail_json(msg='failed to create volume %s' % volume_name)
+
+ if action != 'absent' and volume_name not in volumes:
+ module.fail_json(msg='volume not found %s' % volume_name)
+
+ if action == 'started':
+ if volumes[volume_name]['status'].lower() != 'started':
+ start_volume(volume_name)
+ changed = True
+
+ if action == 'stopped':
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ changed = True
+
+ if changed:
+ volumes = get_volumes()
+ if rebalance:
+ do_rebalance(volume_name)
+
+ facts = {}
+ facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
+
+ module.exit_json(changed=changed, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py
new file mode 100644
index 00000000..04604c09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+short_description: Manage HPE StoreServ 3PAR CPG
+author:
+ - Farhan Nomani (@farhan7500)
+ - Gautham P Hegde (@gautamphegde)
+description:
+ - Create and delete CPG on HPE 3PAR.
+module: ss_3par_cpg
+options:
+ cpg_name:
+ description:
+ - Name of the CPG.
+ type: str
+ required: true
+ disk_type:
+ choices:
+ - FC
+ - NL
+ - SSD
+ description:
+ - Specifies that physical disks must have the specified device type.
+ type: str
+ domain:
+ description:
+ - Specifies the name of the domain in which the object will reside.
+ type: str
+ growth_increment:
+ description:
+ - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
+ created on each auto-grow operation.
+ type: str
+ growth_limit:
+ description:
+ - Specifies that the autogrow operation is limited to the specified
+ storage amount that sets the growth limit(in MiB, GiB or TiB).
+ type: str
+ growth_warning:
+ description:
+ - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
+ results in a warning alert.
+ type: str
+ high_availability:
+ choices:
+ - PORT
+ - CAGE
+ - MAG
+ description:
+ - Specifies that the layout must support the failure of one port pair,
+ one cage, or one magazine.
+ type: str
+ raid_type:
+ choices:
+ - R0
+ - R1
+ - R5
+ - R6
+ description:
+ - Specifies the RAID type for the logical disk.
+ type: str
+ set_size:
+ description:
+ - Specifies the set size in the number of chunklets.
+ type: int
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the specified CPG should exist or not.
+ required: true
+ type: str
+ secure:
+ description:
+ - Specifies whether the certificate needs to be validated while communicating.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.hpe3par
+
+'''
+
+
+EXAMPLES = r'''
+- name: Create CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: present
+ cpg_name: sample_cpg
+ domain: sample_domain
+ growth_increment: 32000 MiB
+ growth_limit: 64000 MiB
+ growth_warning: 48000 MiB
+ raid_type: R6
+ set_size: 8
+ high_availability: MAG
+ disk_type: FC
+ secure: no
+
+- name: Delete CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: absent
+ cpg_name: sample_cpg
+ secure: no
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+try:
+ from hpe3par_sdk import client
+ from hpe3parclient import exceptions
+ HAS_3PARCLIENT = True
+except ImportError:
+ HAS_3PARCLIENT = False
+
+
+def validate_set_size(raid_type, set_size):
+ if raid_type:
+ set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
+ if set_size in set_size_array:
+ return True
+ return False
+
+
+def cpg_ldlayout_map(ldlayout_dict):
+ if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
+ ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
+ ldlayout_dict['RAIDType']]['raid_value']
+ if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
+ ldlayout_dict['HA'] = getattr(
+ client.HPE3ParClient, ldlayout_dict['HA'])
+ return ldlayout_dict
+
+
+def create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type):
+ try:
+ if not validate_set_size(raid_type, set_size):
+ return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
+ if not client_obj.cpgExists(cpg_name):
+
+ disk_patterns = []
+ if disk_type:
+ disk_type = getattr(client.HPE3ParClient, disk_type)
+ disk_patterns = [{'diskType': disk_type}]
+ ld_layout = {
+ 'RAIDType': raid_type,
+ 'setSize': set_size,
+ 'HA': high_availability,
+ 'diskPatterns': disk_patterns}
+ ld_layout = cpg_ldlayout_map(ld_layout)
+ if growth_increment is not None:
+ growth_increment = hpe3par.convert_to_binary_multiple(
+ growth_increment)
+ if growth_limit is not None:
+ growth_limit = hpe3par.convert_to_binary_multiple(
+ growth_limit)
+ if growth_warning is not None:
+ growth_warning = hpe3par.convert_to_binary_multiple(
+ growth_warning)
+ optional = {
+ 'domain': domain,
+ 'growthIncrementMiB': growth_increment,
+ 'growthLimitMiB': growth_limit,
+ 'usedLDWarningAlertMiB': growth_warning,
+ 'LDLayout': ld_layout}
+ client_obj.createCPG(cpg_name, optional)
+ else:
+ return (True, False, "CPG already present")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG creation failed | %s" % (e))
+ return (True, True, "Created CPG %s successfully." % cpg_name)
+
+
+def delete_cpg(
+ client_obj,
+ cpg_name):
+ try:
+ if client_obj.cpgExists(cpg_name):
+ client_obj.deleteCPG(cpg_name)
+ else:
+ return (True, False, "CPG does not exist")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG delete failed | %s" % e)
+ return (True, True, "Deleted CPG %s successfully." % cpg_name)
+
+
+def main():
+ module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+ if not HAS_3PARCLIENT:
+ module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
+
+ if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
+ module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
+
+ storage_system_ip = module.params["storage_system_ip"]
+ storage_system_username = module.params["storage_system_username"]
+ storage_system_password = module.params["storage_system_password"]
+ cpg_name = module.params["cpg_name"]
+ domain = module.params["domain"]
+ growth_increment = module.params["growth_increment"]
+ growth_limit = module.params["growth_limit"]
+ growth_warning = module.params["growth_warning"]
+ raid_type = module.params["raid_type"]
+ set_size = module.params["set_size"]
+ high_availability = module.params["high_availability"]
+ disk_type = module.params["disk_type"]
+ secure = module.params["secure"]
+
+ wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
+ try:
+ client_obj = client.HPE3ParClient(wsapi_url, secure)
+ except exceptions.SSLCertFailed:
+ module.fail_json(msg="SSL Certificate Failed")
+ except exceptions.ConnectionError:
+ module.fail_json(msg="Connection Error")
+ except exceptions.UnsupportedVersion:
+ module.fail_json(msg="Unsupported WSAPI version")
+ except Exception as e:
+ module.fail_json(msg="Initializing client failed. %s" % e)
+
+ if storage_system_username is None or storage_system_password is None:
+ module.fail_json(msg="Storage system username or password is None")
+ if cpg_name is None:
+ module.fail_json(msg="CPG Name is None")
+
+ # States
+ if module.params["state"] == "present":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ elif module.params["state"] == "absent":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = delete_cpg(
+ client_obj,
+ cpg_name
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ if return_status:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py
new file mode 100644
index 00000000..29690497
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_domain
+short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ domain:
+ description:
+ - Name of the domain to be managed.
+ required: true
+ state:
+ description:
+ - The desired state of the domain.
+ default: "present"
+ choices: [ "present", "absent" ]
+ ldap_id:
+ description:
+ - ldap id to add to the domain.
+ required: false
+ size:
+ description:
+ - Size of the domain.
+ required: false
+ hard_capacity:
+ description:
+ - Hard capacity of the domain.
+ required: false
+ soft_capacity:
+ description:
+ - Soft capacity of the domain.
+ required: false
+ max_cgs:
+ description:
+ - Number of max cgs.
+ required: false
+ max_dms:
+ description:
+ - Number of max dms.
+ required: false
+ max_mirrors:
+ description:
+ - Number of max_mirrors.
+ required: false
+ max_pools:
+ description:
+ - Number of max_pools.
+ required: false
+ max_volumes:
+ description:
+ - Number of max_volumes.
+ required: false
+ perf_class:
+ description:
+ - Add the domain to a performance class.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ size: domain_size
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+msg:
+ description: module return status.
+ returned: as needed
+ type: str
+ sample: "domain 'domain_name' created successfully."
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ domain=dict(required=True),
+ size=dict(),
+ max_dms=dict(),
+ max_cgs=dict(),
+ ldap_id=dict(),
+ max_mirrors=dict(),
+ max_pools=dict(),
+ max_volumes=dict(),
+ perf_class=dict(),
+ hard_capacity=dict(),
+ soft_capacity=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ domain = xcli_client.cmd.domain_list(
+ domain=module.params['domain']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ msg = 'Domain \'{0}\''.format(module.params['domain'])
+ if state == 'present' and not domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_create', xcli_client)
+ msg += " created successfully."
+ elif state == 'absent' and domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_delete', xcli_client)
+ msg += " deleted successfully."
+ else:
+ msg += " state unchanged."
+
+ module.exit_json(changed=state_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py
new file mode 100644
index 00000000..5ce12992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host
+short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ cluster:
+ description:
+ - The name of the cluster to include the host.
+ required: false
+ domain:
+ description:
+ - The domains the cluster will be attached to.
+ To include more than one domain,
+ separate domain names with commas.
+ To include all existing domains, use an asterisk ("*").
+ required: false
+ iscsi_chap_name:
+ description:
+ - The host's CHAP name identifier
+ required: false
+ iscsi_chap_secret:
+ description:
+ - The password of the initiator used to
+ authenticate to the system when CHAP is enable
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ cluster=dict(),
+ domain=dict(),
+ iscsi_chap_name=dict(),
+ iscsi_chap_secret=dict(no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ host = xcli_client.cmd.host_list(
+ host=module.params['host']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_define', xcli_client)
+ elif state == 'absent' and host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py
new file mode 100644
index 00000000..981bc553
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host_ports
+short_description: Add host ports on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds ports to or removes them from the hosts
+ on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host ports state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ iscsi_name:
+ description:
+ - iSCSI initiator name.
+ required: false
+ fcaddress:
+ description:
+ - Fiber channel address.
+ required: false
+ num_of_visible_targets:
+ description:
+ - Number of visible targets.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Add ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Remove ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
+ spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ iscsi_name=dict(),
+ fcaddress=dict(),
+ num_of_visible_targets=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ ports = []
+ try:
+ ports = xcli_client.cmd.host_list_ports(
+ host=module.params.get('host')).as_list
+ except Exception:
+ pass
+ state = module.params['state']
+ port_exists = False
+ ports = [port.get('port_name') for port in ports]
+
+ fc_ports = (module.params.get('fcaddress')
+ if module.params.get('fcaddress') else [])
+ iscsi_ports = (module.params.get('iscsi_name')
+ if module.params.get('iscsi_name') else [])
+ for port in ports:
+ if port in iscsi_ports or port in fc_ports:
+ port_exists = True
+ break
+ state_changed = False
+ if state == 'present' and not port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_add_port', xcli_client)
+ if state == 'absent' and port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_remove_port', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py
new file mode 100644
index 00000000..812904eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_pool
+short_description: Handles pools on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
+
+options:
+ pool:
+ description:
+ - Pool name.
+ required: true
+ state:
+ description:
+ - Pool state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Pool size in GB
+ required: false
+ snapshot_size:
+ description:
+ - Pool snapshot size in GB
+ required: false
+ domain:
+ description:
+ - Adds the pool to the specified domain.
+ required: false
+ perf_class:
+ description:
+ - Assigns a perf_class to the pool.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create new pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ size: 300
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ snapshot_size=dict(),
+ domain=dict(),
+ perf_class=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ pool = xcli_client.cmd.pool_list(
+ pool=module.params['pool']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_create', xcli_client)
+ if state == 'absent' and pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py
new file mode 100644
index 00000000..bf578cee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol
+short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ pool:
+ description:
+ - Volume pool.
+ required: false
+ state:
+ description:
+ - Volume state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Volume size.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create a new volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ pool: pool_name
+ size: 17
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete an existing volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ pool=dict(),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ volume = xcli_client.cmd.vol_list(
+ vol=module.params.get('vol')).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_create', xcli_client)
+ elif state == 'absent' and volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py
new file mode 100644
index 00000000..f1f5a807
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol_map
+short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module maps volumes to or unmaps them from the hosts on
+ IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ state:
+ default: "present"
+ choices: [ "present", "absent" ]
+ description:
+ - When the state is present the volume is mapped.
+ When the state is absent, the volume is meant to be unmapped.
+
+ cluster:
+ description:
+ - Maps the volume to a cluster.
+ required: false
+ host:
+ description:
+ - Maps the volume to a host.
+ required: false
+ lun:
+ description:
+ - The LUN identifier.
+ required: false
+ override:
+ description:
+ - Overrides the existing volume mapping.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Map volume to host.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Map volume to cluster.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ cluster: cluster_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Unmap volume.
+ community.general.ibm_sa_vol_map:
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ lun=dict(),
+ cluster=dict(),
+ host=dict(),
+ override=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ mapping = False
+ try:
+ mapped_hosts = xcli_client.cmd.vol_mapping_list(
+ vol=module.params.get('vol')).as_list
+ for host in mapped_hosts:
+ if host['host'] == module.params.get("host", ""):
+ mapping = True
+ except Exception:
+ pass
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not mapping:
+ state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
+ if state == 'absent' and mapping:
+ state_changed = execute_pyxcli_command(
+ module, 'unmap_vol', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py
new file mode 100644
index 00000000..f82bd7ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_aggregate
+
+short_description: Manage NetApp cDOT aggregates.
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_aggregate) instead.
+
+description:
+- Create or destroy aggregates on NetApp cDOT.
+
+options:
+
+ state:
+ required: true
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+
+ name:
+ required: true
+ description:
+ - The name of the aggregate to manage.
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+
+'''
+
+EXAMPLES = """
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTAggregate(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['disk_count'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.disk_count = p['disk_count']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_aggr(self):
+ """
+ Checks if aggregate exists.
+
+ :return:
+ True if aggregate found
+ False if aggregate is not found
+ :rtype: bool
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(e.code) == "13040":
+ return False
+ else:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_aggr(self):
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-create', **{'aggregate': self.name,
+ 'disk-count': str(self.disk_count)})
+
+ try:
+ self.server.invoke_successfully(aggr_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.name,
+ 'new-aggregate-name':
+ self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ aggregate_exists = self.get_aggr()
+ rename_aggregate = False
+
+ # check if anything needs to be changed (add/delete/update)
+
+ if aggregate_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == self.name:
+ rename_aggregate = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ # Aggregate does not exist, but requested state is present.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not aggregate_exists:
+ self.create_aggr()
+
+ else:
+ if rename_aggregate:
+ self.rename_aggregate()
+
+ elif self.state == 'absent':
+ self.delete_aggr()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTAggregate()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py
new file mode 100644
index 00000000..36c5416a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_license
+
+short_description: Manage NetApp cDOT protocol and feature licenses
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_license) instead.
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+ default: false
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+ default: false
+
+ serial_number:
+ description:
+ - Serial number of the node associated with the license.
+ - This parameter is used primarily when removing license for a specific service.
+ - If this parameter is not provided, the cluster serial number is used by default.
+
+ licenses:
+ description:
+ - List of licenses to add or remove.
+ - Please note that trying to remove a non-existent license will throw an error.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ licenses:
+ nfs: #################
+ cifs: #################
+ iscsi: #################
+ fcp: #################
+ snaprestore: #################
+ flexclone: #################
+
+- name: Remove licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ licenses:
+ nfs: remove
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLicense(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ serial_number=dict(required=False, type='str', default=None),
+ remove_unused=dict(default=False, type='bool'),
+ remove_expired=dict(default=False, type='bool'),
+ licenses=dict(default=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.serial_number = p['serial_number']
+ self.remove_unused = p['remove_unused']
+ self.remove_expired = p['remove_expired']
+ self.licenses = p['licenses']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, remove_list):
+ """
+ Remove requested licenses
+ :param:
+ remove_list : List of packages to remove
+
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ for package in remove_list:
+ license_delete.add_new_child('package', package)
+
+ if self.serial_number is not None:
+ license_delete.add_new_child('serial-number', self.serial_number)
+
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def update_licenses(self):
+ """
+ Update licenses
+ """
+ # Remove unused and expired licenses, if requested.
+ if self.remove_unused:
+ self.remove_unused_licenses()
+
+ if self.remove_expired:
+ self.remove_expired_licenses()
+
+ # Next, add/remove specific requested licenses.
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ remove_list = []
+ for key, value in self.licenses.items():
+ str_value = str(value)
+ # Make sure license is not an empty string.
+ if str_value and str_value.strip():
+ if str_value.lower() == 'remove':
+ remove_list.append(str(key).lower())
+ else:
+ codes.add_new_child('license-code-v2', str_value)
+
+ # Remove requested licenses.
+ if len(remove_list) != 0:
+ self.remove_licenses(remove_list)
+
+ # Add requested licenses
+ if len(codes.get_children()) != 0:
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+ self.update_licenses()
+ new_license_status = self.get_licensing_status()
+
+ if license_status != new_license_status:
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLicense()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py
new file mode 100644
index 00000000..3236dbee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_lun
+
+short_description: Manage NetApp cDOT luns
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_lun) instead.
+
+description:
+- Create, destroy, resize luns on NetApp cDOT.
+
+options:
+
+ state:
+ description:
+ - Whether the specified lun should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the lun to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the lun should exist on.
+ - Required when C(state=present).
+
+ size:
+ description:
+ - The size of the lun in C(size_unit).
+ - Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ force_resize:
+ description:
+ - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize Lun
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLUN(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.force_resize = p['force_resize']
+ self.force_remove = p['force_remove']
+ self.force_remove_fenced = p['force_remove_fenced']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+
+ luns = []
+ tag = None
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.vserver)
+ query_details.add_new_child('volume', self.flexvol_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+
+ tag = result.get_child_content('next-tag')
+
+ if tag is None:
+ break
+
+ # The LUNs have been extracted.
+ # Find the specified lun and extract details.
+ return_value = None
+ for lun in luns:
+ path = lun.get_child_content('path')
+ _rest, _splitter, found_name = path.rpartition('/')
+
+ if found_name == self.name:
+ size = lun.get_child_content('size')
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': path})
+
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value = {
+ 'name': found_name,
+ 'size': size,
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ }
+ else:
+ continue
+
+ return return_value
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **{'path': path,
+ 'size': str(self.size),
+ 'ostype': 'linux'})
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self):
+ """
+ Delete requested LUN
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.force_remove),
+ 'destroy-fenced-lun':
+ str(self.force_remove_fenced)})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.size),
+ 'force': str(self.force_resize)})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def apply(self):
+ property_changed = False
+ multiple_properties_changed = False
+ size_changed = False
+ lun_exists = False
+ lun_detail = self.get_lun()
+
+ if lun_detail:
+ lun_exists = True
+ current_size = lun_detail['size']
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if not int(current_size) == self.size:
+ size_changed = True
+ property_changed = True
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not lun_exists:
+ self.create_lun()
+
+ else:
+ if size_changed:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun()
+ if not size_changed and not \
+ multiple_properties_changed:
+ property_changed = False
+
+ elif self.state == 'absent':
+ self.delete_lun()
+
+ changed = property_changed or size_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLUN()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py
new file mode 100644
index 00000000..9f7ce60d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_qtree
+
+short_description: Manage qtrees
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_qtree) instead.
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Qtree should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the Qtree to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the Qtree should exist on. Required when C(state=present).
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+- name: Create QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTQTree(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_qtree(self):
+ """
+ Checks if the qtree exists.
+
+ :return:
+ True if qtree found
+ False if qtree is not found
+ :rtype: bool
+ """
+
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.vserver,
+ 'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_qtree(self):
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **{'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **{'qtree': path})
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ qtree_exists = False
+ rename_qtree = False
+ qtree_detail = self.get_qtree()
+
+ if qtree_detail:
+ qtree_exists = True
+
+ if self.state == 'absent':
+ # Qtree exists, but requested state is 'absent'.
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == \
+ self.name:
+ changed = True
+ rename_qtree = True
+
+ else:
+ if self.state == 'present':
+ # Qtree does not exist, but requested state is 'present'.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not qtree_exists:
+ self.create_qtree()
+
+ else:
+ if rename_qtree:
+ self.rename_qtree()
+
+ elif self.state == 'absent':
+ self.delete_qtree()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTQTree()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py
new file mode 100644
index 00000000..0227a014
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_svm
+
+short_description: Manage NetApp cDOT svm
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_svm) instead.
+
+description:
+- Create or destroy svm on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ required: true
+
+ root_volume:
+ description:
+ - Root volume of the SVM. Required when C(state=present).
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Required when C(state=present).
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
+ - Possible values are 'unix', 'ntfs', 'mixed'.
+ - The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
+ - Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
+ - Required when C(state=present)
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ community.general.na_cdot_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTSVM(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['root_volume',
+ 'root_volume_aggregate',
+ 'root_volume_security_style'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.root_volume = p['root_volume']
+ self.root_volume_aggregate = p['root_volume_aggregate']
+ self.root_volume_security_style = p['root_volume_security_style']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_vserver(self):
+ """
+ Checks if vserver exists.
+
+ :return:
+ True if vserver found
+ False if vserver is not found
+ :rtype: bool
+ """
+
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+
+ """
+ TODO:
+ Return more relevant parameters about vserver that can
+ be updated by the playbook.
+ """
+ return True
+ else:
+ return False
+
+ def create_vserver(self):
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-create', **{'vserver-name': self.name,
+ 'root-volume': self.root_volume,
+ 'root-volume-aggregate':
+ self.root_volume_aggregate,
+ 'root-volume-security-style':
+ self.root_volume_security_style
+ })
+
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_vserver(self):
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self):
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.name,
+ 'new-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ vserver_exists = self.get_vserver()
+ rename_vserver = False
+ if vserver_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Update properties
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not vserver_exists:
+ self.create_vserver()
+
+ else:
+ if rename_vserver:
+ self.rename_vserver()
+
+ elif self.state == 'absent':
+ self.delete_vserver()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTSVM()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py
new file mode 100644
index 00000000..626e0aa0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user) instead.
+
+description:
+- Create or destroy users.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+
+ application:
+ description:
+ - Applications to grant access to.
+ required: true
+ choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
+
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - password for console application
+ - password, domain, nsswitch, cert for http application.
+ - password, domain, nsswitch, cert for ontapi application.
+ - community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - usm and community for snmp application (when creating SNMPv3 users).
+ - password for sp application.
+ - password for rsh application.
+ - password for telnet application.
+ - password, publickey, domain, nsswitch for ssh application.
+ required: true
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
+
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ community.general.na_cdot_user:
+ state: present
+ name: SampleUser
+ application: ssh
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ application=dict(required=True, type='str', choices=[
+ 'console', 'http', 'ontapi', 'rsh',
+ 'snmp', 'sp', 'ssh', 'telnet']),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password',
+ 'publickey', 'domain',
+ 'nsswitch', 'usm']),
+ set_password=dict(required=False, type='str', default=None),
+ role_name=dict(required=False, type='str'),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.application = p['application']
+ self.authentication_method = p['authentication_method']
+ self.set_password = p['set_password']
+ self.role_name = p['role_name']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_user(self):
+ """
+ Checks if the user exists.
+
+ :return:
+ True if user found
+ False if user is not found
+ :rtype: bool
+ """
+
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16034 denotes a user not being found.
+ if to_native(e.code) == "16034":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def create_user(self):
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method,
+ 'role-name': self.role_name})
+ if self.set_password is not None:
+ user_create.add_new_child('password', self.set_password)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_user(self):
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ self.server.set_vserver(self.vserver)
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.set_password),
+ 'user-name': self.name})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def apply(self):
+ property_changed = False
+ password_changed = False
+ user_exists = self.get_user()
+
+ if user_exists:
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if self.set_password is not None:
+ password_changed = self.change_password()
+ else:
+ if self.state == 'present':
+ # Check if anything needs to be updated
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not user_exists:
+ self.create_user()
+
+ # Add ability to update parameters.
+
+ elif self.state == 'absent':
+ self.delete_user()
+
+ changed = property_changed or password_changed
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py
new file mode 100644
index 00000000..88133200
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user_role
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user_role) instead.
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ default: 'all'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ community.general.na_cdot_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: DEFAULT
+ access_level: none
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.command_directory_name = p['command_directory_name']
+ self.access_level = p['access_level']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_role(self):
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-create', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name,
+ 'access-level':
+ self.access_level})
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ role_exists = self.get_role()
+
+ if role_exists:
+ if self.state == 'absent':
+ changed = True
+
+ # Check if properties need to be updated
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not role_exists:
+ self.create_role()
+
+ # Update properties
+
+ elif self.state == 'absent':
+ self.delete_role()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUserRole()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py
new file mode 100644
index 00000000..c10911d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_volume
+
+short_description: Manage NetApp cDOT volumes
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_volume) instead.
+
+description:
+- Create or destroy volumes on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ infinite:
+ description:
+ - Set True if the volume is an Infinite Volume.
+ type: bool
+ default: 'no'
+
+ online:
+ description:
+ - Whether the specified volume is online, or not.
+ type: bool
+ default: 'yes'
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on. Required when C(state=present).
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+
+ junction_path:
+ description:
+ - Junction path where to mount the volume
+ required: false
+
+ export_policy:
+ description:
+ - Export policy to set for the specified junction path.
+ required: false
+ default: default
+
+ snapshot_policy:
+ description:
+ - Snapshot policy to set for the specified volume.
+ required: false
+ default: default
+
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ aggregate_name: aggr1
+ size: 20
+ size_unit: mb
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ junction_path: /ansibleVolume
+ export_policy: all_nfs_networks
+ snapshot_policy: daily
+
+ - name: Make FlexVol offline
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
+ is_online=dict(required=False, type='bool', default=True, aliases=['online']),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ aggregate_name=dict(type='str'),
+ vserver=dict(required=True, type='str', default=None),
+ junction_path=dict(required=False, type='str', default=None),
+ export_policy=dict(required=False, type='str', default='default'),
+ snapshot_policy=dict(required=False, type='str', default='default'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['aggregate_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.is_infinite = p['is_infinite']
+ self.is_online = p['is_online']
+ self.size_unit = p['size_unit']
+ self.vserver = p['vserver']
+ self.junction_path = p['junction_path']
+ self.export_policy = p['export_policy']
+ self.snapshot_policy = p['snapshot_policy']
+
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.aggregate_name = p['aggregate_name']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_volume(self):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.name)
+ volume_attributes.add_child_elem(volume_id_attributes)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+
+ volume_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(volume_info, True)
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ volume_attributes = result.get_child_by_name(
+ 'attributes-list').get_child_by_name(
+ 'volume-attributes')
+ # Get volume's current size
+ volume_space_attributes = volume_attributes.get_child_by_name(
+ 'volume-space-attributes')
+ current_size = volume_space_attributes.get_child_content('size')
+
+ # Get volume's state (online/offline)
+ volume_state_attributes = volume_attributes.get_child_by_name(
+ 'volume-state-attributes')
+ current_state = volume_state_attributes.get_child_content('state')
+ is_online = None
+ if current_state == "online":
+ is_online = True
+ elif current_state == "offline":
+ is_online = False
+ return_value = {
+ 'name': self.name,
+ 'size': current_size,
+ 'is_online': is_online,
+ }
+
+ return return_value
+
+ def create_volume(self):
+ create_parameters = {'volume': self.name,
+ 'containing-aggr-name': self.aggregate_name,
+ 'size': str(self.size),
+ }
+ if self.junction_path:
+ create_parameters['junction-path'] = str(self.junction_path)
+ if self.export_policy != 'default':
+ create_parameters['export-policy'] = str(self.export_policy)
+ if self.snapshot_policy != 'default':
+ create_parameters['snapshot-policy'] = str(self.snapshot_policy)
+
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-create', **create_parameters)
+
+ try:
+ self.server.invoke_successfully(volume_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_volume(self):
+ if self.is_infinite:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.name})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.name, 'unmount-and-offline':
+ 'true'})
+
+ try:
+ self.server.invoke_successfully(volume_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename-async',
+ **{'volume-name': self.name, 'new-volume-name': str(
+ self.name)})
+ else:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename', **{'volume': self.name, 'new-volume-name': str(
+ self.name)})
+ try:
+ self.server.invoke_successfully(volume_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size-async',
+ **{'volume-name': self.name, 'new-size': str(
+ self.size)})
+ else:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size', **{'volume': self.name, 'new-size': str(
+ self.size)})
+ try:
+ self.server.invoke_successfully(volume_resize,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_volume_state(self):
+ """
+ Change volume's state (offline/online).
+
+ Note: 'is_infinite' needs to be set to True in order to change the
+ state of an Infinite Volume.
+ """
+ state_requested = None
+ if self.is_online:
+ # Requested state is 'online'.
+ state_requested = "online"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online',
+ **{'name': self.name})
+ else:
+ # Requested state is 'offline'.
+ state_requested = "offline"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline',
+ **{'name': self.name})
+ try:
+ self.server.invoke_successfully(volume_change_state,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
+ (self.name, state_requested, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ rename_volume = False
+ resize_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if str(volume_detail['size']) != str(self.size):
+ resize_volume = True
+ changed = True
+ if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
+ changed = True
+ if self.is_online is False:
+ # Volume is online, but requested state is offline
+ pass
+ else:
+ # Volume is offline but requested state is online
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+
+ else:
+ if resize_volume:
+ self.resize_volume()
+ if volume_detail['is_online'] is not \
+ None and volume_detail['is_online'] != \
+ self.is_online:
+ self.change_volume_state()
+ # Ensure re-naming is the last change made.
+ if rename_volume:
+ self.rename_volume()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py
new file mode 100644
index 00000000..0fc61afb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py
@@ -0,0 +1,610 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_gather_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(netapp.ontap.na_ontap_info) instead.
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+- community.general._netapp.na_ontap
+
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
+ "net_ifgrp_info",
+ "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
+ "nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
+ "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
+ "security_login_account_info", "storage_failover_info", "volume_info",
+ "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported facts for your system.
+ default: "all"
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info (Password Authentication)
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+- ansible.builtin.debug:
+ var: ontap_facts
+- name: Limit Fact Gathering to Aggregate Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+- name: Limit Fact Gathering to Volume and Lun Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+- name: Gather all facts except for volume and lun information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+'''
+
+RETURN = '''
+ontap_facts:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_facts": {
+ "aggregate_info": {...},
+ "cluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_port_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "volume_info": {...},
+ "lun_info": {...},
+ "storage_failover_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "ontap_version": {...},
+ "igroup_info": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...}
+ }'
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherFacts(object):
+ '''Class with gather facts methods'''
+
+ def __init__(self, module):
+ self.module = module
+ self.netapp_info = dict()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.fact_subsets = {
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'field': 'interface-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'field': ('node', 'port'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'field': 'node-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'field': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'field': 'aggregate-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'field': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'field': ('node', 'key-id'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'field': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'field': 'subsystem',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, query=None):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+ try:
+ result = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ return result
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result
+ else:
+ self.module.fail_json(msg="Error calling API %s: %s"
+ % (call, to_native(error)), exception=traceback.format_exc())
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.fact_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query)
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
+ '''Method to run a generic get-iter call'''
+
+ generic_call = self.call_api(call, query)
+
+ if call == 'net-port-ifgrp-get':
+ children = 'attributes'
+ else:
+ children = 'attributes-list'
+
+ if generic_call is None:
+ return None
+
+ if field is None:
+ out = []
+ else:
+ out = {}
+
+ attributes_list = generic_call.get_child_by_name(children)
+
+ if attributes_list is None:
+ return None
+
+ for child in attributes_list.get_children():
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ if isinstance(field, str):
+ unique_key = _finditem(dic, field)
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ elif isinstance(field, tuple):
+ unique_key = ':'.join([_finditem(dic, el) for el in field])
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ else:
+ out.append(convert_keys(json.loads(json.dumps(dic))))
+
+ return out
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
+
+ self.netapp_info['ontap_version'] = self.ontapi()
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ for subset in run_subset:
+ call = self.fact_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.fact_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, key):
+
+ value = __finditem(obj, key)
+ if value is not None:
+ return value
+ raise KeyError(key)
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ out = {}
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ else:
+ return d_param
+ return out
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ if gather_subset is None:
+ gather_subset = ['all']
+ gf_obj = NetAppONTAPGatherFacts(module)
+ gf_all = gf_obj.get_all(gather_subset)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py
new file mode 100644
index 00000000..58c6962b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_account_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_account)
+short_description: Manage SolidFire accounts
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+
+ new_name:
+ description:
+ - New name for the user account.
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ account_id:
+ description:
+ - The ID of the account to manage or update.
+
+ status:
+ description:
+ - Status of the account.
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+
+- name: Modify Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+ new_name: TenantA-Renamed
+
+- name: Delete Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: TenantA-Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireAccount(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=False, type='int', default=None),
+
+ new_name=dict(required=False, type='str', default=None),
+ initiator_secret=dict(required=False, type='str'),
+ target_secret=dict(required=False, type='str'),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+
+ self.new_name = p['new_name']
+ self.initiator_secret = p['initiator_secret']
+ self.target_secret = p['target_secret']
+ self.attributes = p['attributes']
+ self.status = p['status']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_account(self):
+ """
+ Return account object if found
+
+ :return: Details about the account. None if not found.
+ :rtype: dict
+ """
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ if account.username == self.name:
+ # Update self.account_id:
+ if self.account_id is not None:
+ if account.account_id == self.account_id:
+ return account
+ else:
+ self.account_id = account.account_id
+ return account
+ return None
+
+ def create_account(self):
+ try:
+ self.sfe.add_account(username=self.name,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.new_name,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ account_exists = False
+ update_account = False
+ account_detail = self.get_account()
+
+ if account_detail:
+ account_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if account_detail.username is not None and self.new_name is not None and \
+ account_detail.username != self.new_name:
+ update_account = True
+ changed = True
+
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not account_exists:
+ self.create_account()
+ elif update_account:
+ self.update_account()
+
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireAccount()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py
new file mode 100644
index 00000000..cfe24832
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_check_connections
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_check_connections)
+short_description: Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ community.general.sf_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.skip = p['skip']
+ self.mvip = p['mvip']
+ self.svip = p['svip']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_mvip(mvip=self.mvip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_svip(svip=self.svip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check(self):
+
+ failed = True
+ msg = ''
+
+ if self.skip is None:
+ mvip_connection_established = self.check_mvip_connection()
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ elif not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'mvip':
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'svip':
+ mvip_connection_established = self.check_mvip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ else:
+ failed = False
+
+ if failed:
+ self.module.fail_json(msg=msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ v = SolidFireConnection()
+ v.check()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py
new file mode 100644
index 00000000..296e50bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_snapshot_schedule_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_snapshot_schedule)
+short_description: Manage SolidFire snapshot schedules
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ required: false
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ required: false
+
+ time_interval_days:
+ description: Time interval in days.
+ required: false
+ default: 1
+
+ time_interval_hours:
+ description: Time interval in hours.
+ required: false
+ default: 0
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ required: false
+ default: 0
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ required: true
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ required: false
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - At least 1 volume ID is required for creating a new schedule.
+ - required when C(state=present)
+ required: false
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ required: false
+
+ schedule_id:
+ description:
+ - The schedule ID for the schedule that you want to update or delete.
+ required: false
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
+ - "Format: C(2016--12--01T00:00:00Z)"
+ required: false
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: Schedule_A
+ time_interval_days: 1
+ starting_date: 2016--12--01T00:00:00Z
+ volumes: 7
+
+ - name: Update Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ schedule_id: 6
+ recurring: True
+ snapshot_name: AnsibleSnapshots
+
+ - name: Delete Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ schedule_id: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireSnapShotSchedule(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ time_interval_days=dict(required=False, type='int', default=1),
+ time_interval_hours=dict(required=False, type='int', default=0),
+ time_interval_minutes=dict(required=False, type='int', default=0),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list'),
+ retention=dict(required=False, type='str'),
+
+ schedule_id=dict(type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['starting_date', 'volumes'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ # self.interval = p['interval']
+
+ self.time_interval_days = p['time_interval_days']
+ self.time_interval_hours = p['time_interval_hours']
+ self.time_interval_minutes = p['time_interval_minutes']
+
+ self.paused = p['paused']
+ self.recurring = p['recurring']
+
+ self.starting_date = p['starting_date']
+ if self.starting_date is not None:
+ self.starting_date = self.starting_date.replace("--", "-")
+
+ self.snapshot_name = p['snapshot_name']
+ self.volumes = p['volumes']
+ self.retention = p['retention']
+
+ self.schedule_id = p['schedule_id']
+
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_schedule(self):
+ schedule_list = self.sfe.list_schedules()
+ for schedule in schedule_list.schedules:
+ if schedule.name == self.name:
+ # Update self.schedule_id:
+ if self.schedule_id is not None:
+ if schedule.schedule_id == self.schedule_id:
+ return schedule
+ else:
+ self.schedule_id = schedule.schedule_id
+ return schedule
+
+ return None
+
+ def create_schedule(self):
+
+ try:
+ sched = netapp_utils.Schedule()
+ # if self.interval == 'time_interval':
+ sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ # Create schedule
+ sched.name = self.name
+ sched.schedule_info = netapp_utils.ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+
+ # Update schedule properties
+
+ # if self.interval == 'time_interval':
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if sched.frequency.days != temp_frequency.days or \
+ sched.frequency.hours != temp_frequency.hours \
+ or sched.frequency.minutes != temp_frequency.minutes:
+ sched.frequency = temp_frequency
+
+ sched.name = self.name
+ if self.volumes is not None:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ schedule_exists = False
+ update_schedule = False
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail:
+ schedule_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+
+ elif schedule_detail.name != self.name:
+ update_schedule = True
+ changed = True
+
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+
+ elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
+ update_schedule = True
+ changed = True
+
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+
+ elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
+ or self.time_interval_days is not None:
+
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours \
+ or schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ pass
+ else:
+ if self.state == 'present':
+ if not schedule_exists:
+ self.create_schedule()
+ elif update_schedule:
+ self.update_schedule()
+
+ elif self.state == 'absent':
+ self.delete_schedule()
+
+ if self.create_schedule_result is not None:
+ self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
+ else:
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireSnapShotSchedule()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py
new file mode 100644
index 00000000..78e3097d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_access_group_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_access_group)
+short_description: Manage SolidFire Volume Access Groups
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volume access groups on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume access group should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Name of the volume access group. It is not required to be unique, but recommended.
+ required: true
+
+ initiators:
+ description:
+ - List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+
+ virtual_network_id:
+ description:
+ - The ID of the SolidFire Virtual Network ID to associate the volume access group with.
+
+ virtual_network_tags:
+ description:
+ - The ID of the VLAN Virtual Network Tag to associate the volume access group with.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ volume_access_group_id:
+ description:
+ - The ID of the volume access group to modify or delete.
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVolumeAccessGroup
+ volumes: [7,8]
+
+ - name: Modify Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ volume_access_group_id: 1
+ name: AnsibleVolumeAccessGroup-Renamed
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Delete Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ volume_access_group_id: 1
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolumeAccessGroup(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ volume_access_group_id=dict(required=False, type='int', default=None),
+
+ initiators=dict(required=False, type='list', default=None),
+ volumes=dict(required=False, type='list', default=None),
+ virtual_network_id=dict(required=False, type='list', default=None),
+ virtual_network_tags=dict(required=False, type='list', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.volume_access_group_id = p['volume_access_group_id']
+
+ self.initiators = p['initiators']
+ self.volumes = p['volumes']
+ self.virtual_network_id = p['virtual_network_id']
+ self.virtual_network_tags = p['virtual_network_tags']
+ self.attributes = p['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume_access_group(self):
+ access_groups_list = self.sfe.list_volume_access_groups()
+
+ for group in access_groups_list.volume_access_groups:
+ if group.name == self.name:
+ # Update self.volume_access_group_id:
+ if self.volume_access_group_id is not None:
+ if group.volume_access_group_id == self.volume_access_group_id:
+ return group
+ else:
+ self.volume_access_group_id = group.volume_access_group_id
+ return group
+ return None
+
+ def create_volume_access_group(self):
+ try:
+ self.sfe.create_volume_access_group(name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_volume_access_group(self):
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_volume_access_group(self):
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ group_exists = False
+ update_group = False
+ group_detail = self.get_volume_access_group()
+
+ if group_detail:
+ group_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the group
+ if self.volumes is not None and group_detail.volumes != self.volumes:
+ update_group = True
+ changed = True
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ update_group = True
+ changed = True
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
+ self.attributes is not None:
+ update_group = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not group_exists:
+ self.create_volume_access_group()
+ elif update_group:
+ self.update_volume_access_group()
+
+ elif self.state == 'absent':
+ self.delete_volume_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireVolumeAccessGroup()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py
new file mode 100644
index 00000000..9d5378a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_volume)
+short_description: Manage SolidFire volumes
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volumes on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ required: true
+
+ 512emulation:
+ description:
+ - Should the volume provide 512-byte sector emulation?
+ - Required when C(state=present)
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+
+ volume_id:
+ description:
+ - The ID of the volume to manage or update.
+ - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
+ parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
+ volume (since it's an auto-generated property).
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ access:
+ description:
+ - "Access allowed for the volume."
+ - "readOnly: Only read operations are allowed."
+ - "readWrite: Reads and writes are allowed."
+ - "locked: No reads or writes are allowed."
+ - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
+ - "If unspecified, the access settings of the clone will be the same as the source."
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True, type='int'),
+
+ enable512e=dict(type='bool', aliases=['512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+
+ volume_id=dict(type='int', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+ self.enable512e = p['enable512e']
+ self.qos = p['qos']
+ self.attributes = p['attributes']
+
+ self.volume_id = p['volume_id']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = p['access']
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume(self):
+ """
+ Return volume object if found
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
+ for volume in volume_list.volumes:
+ if volume.name == self.name:
+ # Update self.volume_id
+ if self.volume_id is not None:
+ if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
+ return volume
+ else:
+ if str(volume.delete_time) == "":
+ self.volume_id = volume.volume_id
+ return volume
+ return None
+
+ def create_volume(self):
+ try:
+ self.sfe.create_volume(name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ qos=self.qos,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self):
+ try:
+ self.sfe.delete_volume(volume_id=self.volume_id)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
+ exception=to_native(err))
+
+ def update_volume(self):
+ try:
+ self.sfe.modify_volume(self.volume_id,
+ account_id=self.account_id,
+ access=self.access,
+ qos=self.qos,
+ total_size=self.size,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error updating volume %s" % self.name,
+ exception=to_native(err))
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ update_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.state == 'present':
+ if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.account_id is not None and self.account_id is not None \
+ and volume_detail.account_id != self.account_id:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.attributes is not None and self.attributes is not None and \
+ volume_detail.attributes != self.attributes:
+ update_volume = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+ result_message = "Volume created"
+ elif update_volume:
+ self.update_volume()
+ result_message = "Volume updated"
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ v = SolidFireVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py
new file mode 100644
index 00000000..5e8b5932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefa_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flasharray.purefa_info) instead.
+short_description: Collect facts from Pure Storage FlashArray
+description:
+ - Collect facts information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
+ type: list
+ required: false
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fa
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefa_facts:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect configuration and capacity facts
+ community.general.purefa_facts:
+ gather_subset:
+ - config
+ - capacity
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect all facts
+ community.general.purefa_facts:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {},
+ "config": {
+ "directory_service": {
+ "array_admin_group": null,
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "check_peer": false,
+ "enabled": false,
+ "group_base": null,
+ "readonly_group": null,
+ "storage_admin_group": null,
+ "uri": []
+ },
+ "dns": {
+ "domain": "domain.com",
+ "nameservers": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "ntp": [
+ "0.ntp.pool.org",
+ "1.ntp.pool.org",
+ "2.ntp.pool.org",
+ "3.ntp.pool.org"
+ ],
+ "smtp": [
+ {
+ "enabled": true,
+ "name": "alerts@acme.com"
+ },
+ {
+ "enabled": true,
+ "name": "user@acme.com"
+ }
+ ],
+ "snmp": [
+ {
+ "auth_passphrase": null,
+ "auth_protocol": null,
+ "community": null,
+ "host": "localhost",
+ "name": "localhost",
+ "privacy_passphrase": null,
+ "privacy_protocol": null,
+ "user": null,
+ "version": "v2c"
+ }
+ ],
+ "ssl_certs": {
+ "country": null,
+ "email": null,
+ "issued_by": "",
+ "issued_to": "",
+ "key_size": 2048,
+ "locality": null,
+ "organization": "Acme Storage, Inc.",
+ "organizational_unit": "Acme Storage, Inc.",
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "2017-08-11T23:09:06Z",
+ "valid_to": "2027-08-09T23:09:06Z"
+ },
+ "syslog": []
+ },
+ "default": {
+ "array_name": "flasharray1",
+ "connected_arrays": 1,
+ "hostgroups": 0,
+ "hosts": 10,
+ "pods": 3,
+ "protection_groups": 1,
+ "purity_version": "5.0.4",
+ "snapshots": 1,
+ "volume_groups": 2
+ },
+ "hgroups": {},
+ "hosts": {
+ "host1": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:2f6f5715a533"
+ ],
+ "wwn": []
+ },
+ "host2": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:d17fb13fe0b"
+ ],
+ "wwn": []
+ },
+ "host3": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:97b1351bfb2"
+ ],
+ "wwn": []
+ },
+ "host4": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:dd84e9a7b2cb"
+ ],
+ "wwn": [
+ "10000000C96C48D1",
+ "10000000C96C48D2"
+ ]
+ }
+ },
+ "interfaces": {
+ "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
+ },
+ "network": {
+ "ct0.eth0": {
+ "address": "10.10.10.10",
+ "gateway": "10.10.10.1",
+ "hwaddr": "ec:f4:bb:c8:8a:04",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct0.eth2": {
+ "address": "10.10.10.11",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:00",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth3": {
+ "address": "10.10.10.12",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:02",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth4": {
+ "address": "10.10.10.13",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth5": {
+ "address": "10.10.10.14",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0d",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "vir0": {
+ "address": "10.10.10.20",
+ "gateway": "10.10.10.1",
+ "hwaddr": "fe:ba:e9:e7:6b:0f",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ }
+ },
+ "offload": {
+ "nfstarget": {
+ "address": "10.0.2.53",
+ "mount_options": null,
+ "mount_point": "/offload",
+ "protocol": "nfs",
+ "status": "scanning"
+ }
+ },
+ "performance": {
+ "input_per_sec": 8191,
+ "output_per_sec": 0,
+ "queue_depth": 1,
+ "reads_per_sec": 0,
+ "san_usec_per_write_op": 15,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 642,
+ "writes_per_sec": 2
+ },
+ "pgroups": {
+ "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
+ "hgroups": null,
+ "hosts": null,
+ "source": "host1",
+ "targets": null,
+ "volumes": [
+ "volume-1"
+ ]
+ }
+ },
+ "pods": {
+ "srm-pod": {
+ "arrays": [
+ {
+ "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
+ "mediator_status": "online",
+ "name": "sn1-405-c09-37",
+ "status": "online"
+ },
+ {
+ "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
+ "mediator_status": "online",
+ "name": "sn1-420-c11-31",
+ "status": "online"
+ }
+ ],
+ "source": null
+ }
+ },
+ "snapshots": {
+ "consisgroup.cgsnapshot": {
+ "created": "2018-03-28T09:34:02Z",
+ "size": 13958643712,
+ "source": "volume-1"
+ }
+ },
+ "subnet": {},
+ "vgroups": {
+ "vvol--vSphere-HA-0ffc7dd1-vg": {
+ "volumes": [
+ "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
+ ]
+ }
+ },
+ "volumes": {
+ "ansible_data": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "host1",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B000114A6",
+ "size": 1099511627776,
+ "source": null
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
+
+
+ADMIN_API_VERSION = '1.14'
+S3_REQUIRED_API_VERSION = '1.16'
+LATENCY_REQUIRED_API_VERSION = '1.16'
+AC_REQUIRED_API_VERSION = '1.14'
+CAP_REQUIRED_API_VERSION = '1.6'
+SAN_REQUIRED_API_VERSION = '1.10'
+NVME_API_VERSION = '1.16'
+PREFERRED_API_VERSION = '1.15'
+CONN_STATUS_API_VERSION = '1.17'
+
+
+def generate_default_dict(array):
+ default_facts = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_facts['volume_groups'] = len(array.list_vgroups())
+ default_facts['connected_arrays'] = len(array.list_array_connections())
+ default_facts['pods'] = len(array.list_pods())
+ default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ # Old FA arrays only report model from the primary controller
+ ct0_model = array.get_hardware('CT0')['model']
+ if ct0_model:
+ model = ct0_model
+ else:
+ ct1_model = array.get_hardware('CT1')['model']
+ model = ct1_model
+ default_facts['array_model'] = model
+ default_facts['array_name'] = defaults['array_name']
+ default_facts['purity_version'] = defaults['version']
+ default_facts['hosts'] = len(hosts)
+ default_facts['snapshots'] = len(snaps)
+ default_facts['protection_groups'] = len(pgroups)
+ default_facts['hostgroups'] = len(hgroups)
+ default_facts['admins'] = len(admins)
+ return default_facts
+
+
+def generate_perf_dict(array):
+ perf_facts = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action='monitor', latency=True)[0]
+ perf_info = array.get(action='monitor')[0]
+ # IOPS
+ perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
+ perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
+
+ # Bandwidth
+ perf_facts['input_per_sec'] = perf_info['input_per_sec']
+ perf_facts['output_per_sec'] = perf_info['output_per_sec']
+
+ # Latency
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
+ perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
+ perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
+ perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
+ perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
+ perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
+ perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
+ perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
+ perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
+ perf_facts['queue_depth'] = perf_info['queue_depth']
+ return perf_facts
+
+
+def generate_config_dict(array):
+ config_facts = {}
+ api_version = array._list_available_rest_versions()
+ # DNS
+ config_facts['dns'] = array.get_dns()
+ # SMTP
+ config_facts['smtp'] = array.list_alert_recipients()
+ # SNMP
+ config_facts['snmp'] = array.list_snmp_managers()
+ config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
+ # DS
+ config_facts['directory_service'] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['directory_service_roles'] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]['name']
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles[role]['group'],
+ 'group_base': roles[role]['group_base'],
+ }
+ else:
+ config_facts['directory_service'].update(array.get_directory_service(groups=True))
+ # NTP
+ config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Phonehome
+ config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
+ # Proxy
+ config_facts['proxy'] = array.get(proxy=True)['proxy']
+ # Relay Host
+ config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
+ # Sender Domain
+ config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Idle Timeout
+ config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
+ # SCSI Timeout
+ config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
+ # SSL
+ config_facts['ssl_certs'] = array.get_certificate()
+ # Global Admin settings
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['global_admin'] = array.get_global_admin_attributes()
+ return config_facts
+
+
+def generate_admin_dict(array):
+ api_version = array._list_available_rest_versions()
+ admin_facts = {}
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]['name']
+ admin_facts[admin_name] = {
+ 'type': admins[admin]['type'],
+ 'role': admins[admin]['role'],
+ }
+ return admin_facts
+
+
+def generate_subnet_dict(array):
+ sub_facts = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]['name']
+ if subnets[sub]['enabled']:
+ sub_facts[sub_name] = {
+ 'gateway': subnets[sub]['gateway'],
+ 'mtu': subnets[sub]['mtu'],
+ 'vlan': subnets[sub]['vlan'],
+ 'prefix': subnets[sub]['prefix'],
+ 'interfaces': subnets[sub]['interfaces'],
+ 'services': subnets[sub]['services'],
+ }
+ return sub_facts
+
+
+def generate_network_dict(array):
+ net_facts = {}
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ net_facts[int_name] = {
+ 'hwaddr': ports[port]['hwaddr'],
+ 'mtu': ports[port]['mtu'],
+ 'enabled': ports[port]['enabled'],
+ 'speed': ports[port]['speed'],
+ 'address': ports[port]['address'],
+ 'slaves': ports[port]['slaves'],
+ 'services': ports[port]['services'],
+ 'gateway': ports[port]['gateway'],
+ 'netmask': ports[port]['netmask'],
+ }
+ if ports[port]['subnet']:
+ subnets = array.get_subnet(ports[port]['subnet'])
+ if subnets['enabled']:
+ net_facts[int_name]['subnet'] = {
+ 'name': subnets['name'],
+ 'prefix': subnets['prefix'],
+ 'vlan': subnets['vlan'],
+ }
+ return net_facts
+
+
+def generate_capacity_dict(array):
+ capacity_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]['capacity']
+ used_space = capacity[0]["total"]
+ capacity_facts['free_space'] = total_capacity - used_space
+ capacity_facts['total_capacity'] = total_capacity
+ capacity_facts['data_reduction'] = capacity[0]['data_reduction']
+ capacity_facts['system_space'] = capacity[0]['system']
+ capacity_facts['volume_space'] = capacity[0]['volumes']
+ capacity_facts['shared_space'] = capacity[0]['shared_space']
+ capacity_facts['snapshot_space'] = capacity[0]['snapshots']
+ capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
+ capacity_facts['total_reduction'] = capacity[0]['total_reduction']
+
+ return capacity_facts
+
+
+def generate_snap_dict(array):
+ snap_facts = {}
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_facts[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ }
+ return snap_facts
+
+
+def generate_vol_dict(array):
+ volume_facts = {}
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_facts[volume] = {
+ 'source': vols[vol]['source'],
+ 'size': vols[vol]['size'],
+ 'serial': vols[vol]['serial'],
+ 'hosts': [],
+ 'bandwidth': ""
+ }
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]['name']
+ qos = qvols[qvol]['bandwidth_limit']
+ volume_facts[volume]['bandwidth'] = qos
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]['name']
+ volume_facts[volume] = {
+ 'source': vvols[vvol]['source'],
+ 'serial': vvols[vvol]['serial'],
+ 'hosts': []
+ }
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]['name']
+ voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
+ volume_facts[volume]['hosts'].append(voldict)
+ return volume_facts
+
+
+def generate_host_dict(array):
+ api_version = array._list_available_rest_versions()
+ host_facts = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ tports = []
+ host_all_info = array.get_host(hostname, all=True)
+ if host_all_info:
+ tports = host_all_info[0]['target_port']
+ host_facts[hostname] = {
+ 'hgroup': hosts[host]['hgroup'],
+ 'iqn': hosts[host]['iqn'],
+ 'wwn': hosts[host]['wwn'],
+ 'personality': array.get_host(hostname,
+ personality=True)['personality'],
+ 'target_port': tports
+ }
+ if NVME_API_VERSION in api_version:
+ host_facts[hostname]['nqn'] = hosts[host]['nqn']
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
+ return host_facts
+
+
+def generate_pgroups_dict(array):
+ pgroups_facts = {}
+ pgroups = array.list_pgroups()
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]['name']
+ pgroups_facts[protgroup] = {
+ 'hgroups': pgroups[pgroup]['hgroups'],
+ 'hosts': pgroups[pgroup]['hosts'],
+ 'source': pgroups[pgroup]['source'],
+ 'targets': pgroups[pgroup]['targets'],
+ 'volumes': pgroups[pgroup]['volumes'],
+ }
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
+ pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
+ pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
+ pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
+ pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
+ pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
+ pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
+ pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
+ pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
+ pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
+ pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
+ pgroups_facts[protgroup]['days'] = prot_reten['days']
+ pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
+ pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
+ if ":" in protgroup:
+ snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
+ pgroups_facts[protgroup]['snaps'] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]['name']
+ pgroups_facts[protgroup]['snaps'][snap] = {
+ 'created': snap_transfers[snap_transfer]['created'],
+ 'started': snap_transfers[snap_transfer]['started'],
+ 'completed': snap_transfers[snap_transfer]['completed'],
+ 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
+ 'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
+ 'progress': snap_transfers[snap_transfer]['progress'],
+ }
+ return pgroups_facts
+
+
+def generate_pods_dict(array):
+ pods_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods()
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_facts[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ }
+ return pods_facts
+
+
+def generate_conn_array_dict(array):
+ conn_array_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CONN_STATUS_API_VERSION in api_version:
+ carrays = array.list_connected_arrays()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]['array_name']
+ conn_array_facts[arrayname] = {
+ 'array_id': carrays[carray]['id'],
+ 'throtled': carrays[carray]['throtled'],
+ 'version': carrays[carray]['version'],
+ 'type': carrays[carray]['type'],
+ 'mgmt_ip': carrays[carray]['management_address'],
+ 'repl_ip': carrays[carray]['replication_address'],
+ }
+ if CONN_STATUS_API_VERSION in api_version:
+ conn_array_facts[arrayname]['status'] = carrays[carray]['status']
+ return conn_array_facts
+
+
+def generate_apps_dict(array):
+ apps_facts = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]['name']
+ apps_facts[appname] = {
+ 'version': apps[app]['version'],
+ 'status': apps[app]['status'],
+ 'description': apps[app]['description'],
+ }
+ return apps_facts
+
+
+def generate_vgroups_dict(array):
+ vgroups_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups()
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]['name']
+ vgroups_facts[virtgroup] = {
+ 'volumes': vgroups[vgroup]['volumes'],
+ }
+ return vgroups_facts
+
+
+def generate_nfs_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'mount_point': offload[target]['mount_point'],
+ 'protocol': offload[target]['protocol'],
+ 'mount_options': offload[target]['mount_options'],
+ 'address': offload[target]['address'],
+ }
+ return offload_facts
+
+
+def generate_s3_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'bucket': offload[target]['bucket'],
+ 'protocol': offload[target]['protocol'],
+ 'access_key_id': offload[target]['access_key_id'],
+ }
+ return offload_facts
+
+
+def generate_hgroups_dict(array):
+ hgroups_facts = {}
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]['name']
+ hgroups_facts[hostgroup] = {
+ 'hosts': hgroups[hgroup]['hosts'],
+ 'pgs': [],
+ 'vols': [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]['name']
+ hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]['name']
+ volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
+ hgroups_facts[pgname]['vols'].append(volpgdict)
+ return hgroups_facts
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_facts = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ if ports[port]['wwn']:
+ int_facts[int_name] = ports[port]['wwn']
+ if ports[port]['iqn']:
+ int_facts[int_name] = ports[port]['iqn']
+ if NVME_API_VERSION in api_version:
+ if ports[port]['nqn']:
+ int_facts[int_name] = ports[port]['nqn']
+ return int_facts
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ array = get_system(module)
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
+ 'hosts', 'admins', 'volumes', 'snapshots', 'pods',
+ 'vgroups', 'offload', 'apps', 'arrays')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(array)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(array)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(array)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(array)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(array)
+ if 'subnet' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(array)
+ if 'interfaces' in subset or 'all' in subset:
+ facts['interfaces'] = generate_interfaces_dict(array)
+ if 'hosts' in subset or 'all' in subset:
+ facts['hosts'] = generate_host_dict(array)
+ if 'volumes' in subset or 'all' in subset:
+ facts['volumes'] = generate_vol_dict(array)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(array)
+ if 'hgroups' in subset or 'all' in subset:
+ facts['hgroups'] = generate_hgroups_dict(array)
+ if 'pgroups' in subset or 'all' in subset:
+ facts['pgroups'] = generate_pgroups_dict(array)
+ if 'pods' in subset or 'all' in subset:
+ facts['pods'] = generate_pods_dict(array)
+ if 'admins' in subset or 'all' in subset:
+ facts['admins'] = generate_admin_dict(array)
+ if 'vgroups' in subset or 'all' in subset:
+ facts['vgroups'] = generate_vgroups_dict(array)
+ if 'offload' in subset or 'all' in subset:
+ facts['nfs_offload'] = generate_nfs_offload_dict(array)
+ facts['s3_offload'] = generate_s3_offload_dict(array)
+ if 'apps' in subset or 'all' in subset:
+ facts['apps'] = generate_apps_dict(array)
+ if 'arrays' in subset or 'all' in subset:
+ facts['arrays'] = generate_conn_array_dict(array)
+
+ module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py
new file mode 100644
index 00000000..8c5a40c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py
@@ -0,0 +1,652 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefb_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flashblade.purefb_info) instead.
+short_description: Collect facts from Pure Storage FlashBlade
+description:
+ - Collect facts information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems and snapshots.
+ required: false
+ type: list
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fb
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefb_facts:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect configuration and capacity facts
+ community.general.purefb_facts:
+ gather_subset:
+ - config
+ - capacity
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect all facts
+ community.general.purefb_facts:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "*(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+HARD_LIMIT_API_VERSION = '1.4'
+
+
+def generate_default_dict(blade):
+ default_facts = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_facts['flashblade_name'] = defaults.name
+ default_facts['purity_version'] = defaults.version
+ default_facts['filesystems'] = \
+ len(blade.file_systems.list_file_systems().items)
+ default_facts['snapshots'] = \
+ len(blade.file_system_snapshots.list_file_system_snapshots().items)
+ default_facts['buckets'] = len(blade.buckets.list_buckets().items)
+ default_facts['object_store_users'] = \
+ len(blade.object_store_users.list_object_store_users().items)
+ default_facts['object_store_accounts'] = \
+ len(blade.object_store_accounts.list_object_store_accounts().items)
+ default_facts['blades'] = len(blade.blade.list_blades().items)
+ default_facts['total_capacity'] = \
+ blade.arrays.list_arrays_space().items[0].capacity
+ return default_facts
+
+
+def generate_perf_dict(blade):
+ perf_facts = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol='http')
+ s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
+ nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
+ perf_facts['aggregate'] = {
+ 'bytes_per_op': total_perf.items[0].bytes_per_op,
+ 'bytes_per_read': total_perf.items[0].bytes_per_read,
+ 'bytes_per_write': total_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': total_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': total_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': total_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': total_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': total_perf.items[0].writes_per_sec,
+ }
+ perf_facts['http'] = {
+ 'bytes_per_op': http_perf.items[0].bytes_per_op,
+ 'bytes_per_read': http_perf.items[0].bytes_per_read,
+ 'bytes_per_write': http_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': http_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': http_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': http_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': http_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': http_perf.items[0].writes_per_sec,
+ }
+ perf_facts['s3'] = {
+ 'bytes_per_op': s3_perf.items[0].bytes_per_op,
+ 'bytes_per_read': s3_perf.items[0].bytes_per_read,
+ 'bytes_per_write': s3_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': s3_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': s3_perf.items[0].writes_per_sec,
+ }
+ perf_facts['nfs'] = {
+ 'bytes_per_op': nfs_perf.items[0].bytes_per_op,
+ 'bytes_per_read': nfs_perf.items[0].bytes_per_read,
+ 'bytes_per_write': nfs_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': nfs_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': nfs_perf.items[0].writes_per_sec,
+ }
+
+ return perf_facts
+
+
+def generate_config_dict(blade):
+ config_facts = {}
+ config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
+ config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
+ config_facts['alert_watchers'] = \
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_facts['array_management'] = \
+ blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
+ config_facts['directory_service_roles'] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles.items[role].group,
+ 'group_base': roles.items[role].group_base
+ }
+ config_facts['nfs_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
+ config_facts['smb_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
+ config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_facts['ssl_certs'] = \
+ blade.certificates.list_certificates().items[0].to_dict()
+ return config_facts
+
+
+def generate_subnet_dict(blade):
+ sub_facts = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_facts[sub_name] = {
+ 'gateway': subnets.items[sub].gateway,
+ 'mtu': subnets.items[sub].mtu,
+ 'vlan': subnets.items[sub].vlan,
+ 'prefix': subnets.items[sub].prefix,
+ 'services': subnets.items[sub].services,
+ }
+ sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
+ sub_facts[sub_name]['interfaces'] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
+ return sub_facts
+
+
+def generate_lag_dict(blade):
+ lag_facts = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_facts[lag_name] = {
+ 'lag_speed': groups.items[groupcnt].lag_speed,
+ 'port_speed': groups.items[groupcnt].port_speed,
+ 'status': groups.items[groupcnt].status,
+ }
+ lag_facts[lag_name]['ports'] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
+ return lag_facts
+
+
+def generate_network_dict(blade):
+ net_facts = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_facts[int_name] = {
+ 'type': ports.items[portcnt].type,
+ 'mtu': ports.items[portcnt].mtu,
+ 'vlan': ports.items[portcnt].vlan,
+ 'address': ports.items[portcnt].address,
+ 'services': ports.items[portcnt].services,
+ 'gateway': ports.items[portcnt].gateway,
+ 'netmask': ports.items[portcnt].netmask,
+ }
+ return net_facts
+
+
+def generate_capacity_dict(blade):
+ capacity_facts = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type='file-system')
+ object_cap = blade.arrays.list_arrays_space(type='object-store')
+ capacity_facts['total'] = total_cap.items[0].capacity
+ capacity_facts['aggregate'] = {
+ 'data_reduction': total_cap.items[0].space.data_reduction,
+ 'snapshots': total_cap.items[0].space.snapshots,
+ 'total_physical': total_cap.items[0].space.total_physical,
+ 'unique': total_cap.items[0].space.unique,
+ 'virtual': total_cap.items[0].space.virtual,
+ }
+ capacity_facts['file-system'] = {
+ 'data_reduction': file_cap.items[0].space.data_reduction,
+ 'snapshots': file_cap.items[0].space.snapshots,
+ 'total_physical': file_cap.items[0].space.total_physical,
+ 'unique': file_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+ capacity_facts['object-store'] = {
+ 'data_reduction': object_cap.items[0].space.data_reduction,
+ 'snapshots': object_cap.items[0].space.snapshots,
+ 'total_physical': object_cap.items[0].space.total_physical,
+ 'unique': object_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+
+ return capacity_facts
+
+
+def generate_snap_dict(blade):
+ snap_facts = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_facts[snapshot] = {
+ 'destroyed': snaps.items[snap].destroyed,
+ 'source': snaps.items[snap].source,
+ 'suffix': snaps.items[snap].suffix,
+ 'source_destroyed': snaps.items[snap].source_destroyed,
+ }
+ return snap_facts
+
+
+def generate_fs_dict(blade):
+ fs_facts = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_facts[share] = {
+ 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
+ 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
+ 'provisioned': fsys.items[fsystem].provisioned,
+ 'destroyed': fsys.items[fsystem].destroyed,
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
+ if fsys.items[fsystem].nfs.enabled:
+ fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
+
+ return fs_facts
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnets', 'lags',
+ 'filesystems', 'snapshots')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(blade)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(blade)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(blade)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(blade)
+ if 'lags' in subset or 'all' in subset:
+ facts['lag'] = generate_lag_dict(blade)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(blade)
+ if 'subnets' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(blade)
+ if 'filesystems' in subset or 'all' in subset:
+ facts['filesystems'] = generate_fs_dict(blade)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(blade)
+
+ module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py
new file mode 100644
index 00000000..54bb8c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_eg
+short_description: Manage export groups on Vexata VX100 storage arrays
+description:
+ - Create or delete export groups on a Vexata VX100 array.
+ - An export group is a tuple of a volume group, initiator group and port
+ group that allows a set of volumes to be exposed to one or more hosts
+ through specific array ports.
+author:
+ - Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Export group name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates export group when present or delete when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ vg:
+ description:
+ - Volume group name.
+ type: str
+ ig:
+ description:
+ - Initiator group name.
+ type: str
+ pg:
+ description:
+ - Port group name.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create export group named db_export.
+ community.general.vexata_eg:
+ name: db_export
+ vg: dbvols
+ ig: dbhosts
+ pg: pg1
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete export group named db_export
+ community.general.vexata_eg:
+ name: db_export
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together)
+
+
+def get_eg(module, array):
+ """Retrieve a named vg if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ egs = array.list_egs()
+ eg = filter(lambda eg: eg['name'] == name, egs)
+ if len(eg) == 1:
+ return eg[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve export groups.')
+
+
+def get_vg_id(module, array):
+ """Retrieve a named vg's id if it exists, error if absent."""
+ name = module.params['vg']
+ try:
+ vgs = array.list_vgs()
+ vg = filter(lambda vg: vg['name'] == name, vgs)
+ if len(vg) == 1:
+ return vg[0]['id']
+ else:
+ module.fail_json(msg='Volume group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volume groups.')
+
+
+def get_ig_id(module, array):
+ """Retrieve a named ig's id if it exists, error if absent."""
+ name = module.params['ig']
+ try:
+ igs = array.list_igs()
+ ig = filter(lambda ig: ig['name'] == name, igs)
+ if len(ig) == 1:
+ return ig[0]['id']
+ else:
+ module.fail_json(msg='Initiator group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve initiator groups.')
+
+
+def get_pg_id(module, array):
+ """Retrieve a named pg's id if it exists, error if absent."""
+ name = module.params['pg']
+ try:
+ pgs = array.list_pgs()
+ pg = filter(lambda pg: pg['name'] == name, pgs)
+ if len(pg) == 1:
+ return pg[0]['id']
+ else:
+ module.fail_json(msg='Port group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve port groups.')
+
+
+def create_eg(module, array):
+ """"Create a new export group."""
+ changed = False
+ eg_name = module.params['name']
+ vg_id = get_vg_id(module, array)
+ ig_id = get_ig_id(module, array)
+ pg_id = get_pg_id(module, array)
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ eg = array.create_eg(
+ eg_name,
+ 'Ansible export group',
+ (vg_id, ig_id, pg_id))
+ if eg:
+ module.log(msg='Created export group {0}'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def delete_eg(module, array, eg):
+ changed = False
+ eg_name = eg['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_eg(
+ eg['id'])
+ if ok:
+ module.log(msg='Export group {0} deleted.'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ vg=dict(type='str'),
+ ig=dict(type='str'),
+ pg=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ eg = get_eg(module, array)
+
+ if state == 'present' and not eg:
+ create_eg(module, array)
+ elif state == 'absent' and eg:
+ delete_eg(module, array, eg)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py
new file mode 100644
index 00000000..1cf4cd7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_volume
+short_description: Manage volumes on Vexata VX100 storage arrays
+description:
+ - Create, deletes or extend volumes on a Vexata VX100 array.
+author:
+- Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies volume when present or removes when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ size:
+ description:
+ - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create new 2 TiB volume named foo
+ community.general.vexata_volume:
+ name: foo
+ size: 2T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Expand volume named foo to 4 TiB
+ community.general.vexata_volume:
+ name: foo
+ size: 4T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete volume named foo
+ community.general.vexata_volume:
+ name: foo
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together, size_to_MiB)
+
+
+def get_volume(module, array):
+ """Retrieve a named volume if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ vols = array.list_volumes()
+ vol = filter(lambda v: v['name'] == name, vols)
+ if len(vol) == 1:
+ return vol[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volumes.')
+
+
+def validate_size(module, err_msg):
+ size = module.params.get('size', False)
+ if not size:
+ module.fail_json(msg=err_msg)
+ size = size_to_MiB(size)
+ if size <= 0:
+ module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
+ return size
+
+
+def create_volume(module, array):
+ """"Create a new volume."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to create volume.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.create_volume(
+ module.params['name'],
+ 'Ansible volume',
+ size)
+ if vol:
+ module.log(msg='Created volume {0}'.format(vol['id']))
+ changed = True
+ else:
+ module.fail_json(msg='Volume create failed.')
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def update_volume(module, array, volume):
+ """Expand the volume size."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to update volume')
+ prev_size = volume['volSize']
+ if size <= prev_size:
+ module.log(msg='Volume expanded size needs to be larger '
+ 'than current size.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.grow_volume(
+ volume['name'],
+ volume['description'],
+ volume['id'],
+ size)
+ if vol:
+ changed = True
+ except Exception:
+ pass
+
+ module.exit_json(changed=changed)
+
+
+def delete_volume(module, array, volume):
+ changed = False
+ vol_name = volume['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_volume(
+ volume['id'])
+ if ok:
+ module.log(msg='Volume {0} deleted.'.format(vol_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ volume = get_volume(module, array)
+
+ if state == 'present':
+ if not volume:
+ create_volume(module, array)
+ else:
+ update_volume(module, array, volume)
+ elif state == 'absent' and volume:
+ delete_volume(module, array, volume)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py
new file mode 100644
index 00000000..6b2260fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs).
+ required: true
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: [ absent, present ]
+ required: true
+ origin:
+ description:
+ - Snapshot from which to create a clone.
+ extra_zfs_properties:
+ description:
+ - A dictionary of zfs properties to be set.
+ - See the zfs(8) man page for more information.
+author:
+- Johan Wiren (@johanwiren)
+'''
+
+EXAMPLES = '''
+- name: Create a new file system called myfs in pool rpool with the setuid property turned off
+ community.general.zfs:
+ name: rpool/myfs
+ state: present
+ extra_zfs_properties:
+ setuid: off
+
+- name: Create a new volume called myvol in pool rpool.
+ community.general.zfs:
+ name: rpool/myvol
+ state: present
+ extra_zfs_properties:
+ volsize: 10M
+
+- name: Create a snapshot of rpool/myfs file system.
+ community.general.zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
+
+- name: Create a new file system called myfs2 with snapdir enabled
+ community.general.zfs:
+ name: rpool/myfs2
+ state: present
+ extra_zfs_properties:
+ snapdir: enabled
+
+- name: Create a new file system by cloning a snapshot
+ community.general.zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
+
+- name: Destroy a filesystem
+ community.general.zfs:
+ name: rpool/myfs
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0].split('@')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ origin = self.module.params.get('origin', None)
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if properties:
+ for prop, value in properties.items():
+ if prop == 'volsize':
+ cmd += ['-V', value]
+ elif prop == 'volblocksize':
+ cmd += ['-b', value]
+ else:
+ cmd += ['-o', '%s="%s"' % (prop, value)]
+ if origin and action == 'clone':
+ cmd.append(origin)
+ cmd.append(self.name)
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_properties_if_changed(self):
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.items():
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ origin=dict(type='str', default=None),
+ extra_zfs_properties=dict(type='dict', default={}),
+ ),
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+
+ if module.params.get('origin') and '@' in name:
+ module.fail_json(msg='cannot specify origin when operating on a snapshot')
+
+ # Reverse the boolification of zfs properties
+ for prop, value in module.params['extra_zfs_properties'].items():
+ if isinstance(value, bool):
+ if value is True:
+ module.params['extra_zfs_properties'][prop] = 'on'
+ else:
+ module.params['extra_zfs_properties'][prop] = 'off'
+ else:
+ module.params['extra_zfs_properties'][prop] = value
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zfs = Zfs(module, name, module.params['extra_zfs_properties'])
+
+ if state == 'present':
+ if zfs.exists():
+ zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py
new file mode 100644
index 00000000..223d7f72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: zfs_delegate_admin
+short_description: Manage ZFS delegated administration (user admin privileges)
+description:
+ - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
+ operations normally restricted to the superuser.
+ - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - This module attempts to adhere to the behavior of the command line tool as much as possible.
+requirements:
+ - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
+ versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
+options:
+ name:
+ description:
+ - File system or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to allow (C(present)), or unallow (C(absent)) a permission.
+ - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
+ - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ choices: [ absent, present ]
+ default: present
+ users:
+ description:
+ - List of users to whom permission(s) should be granted.
+ type: list
+ groups:
+ description:
+ - List of groups to whom permission(s) should be granted.
+ type: list
+ everyone:
+ description:
+ - Apply permissions to everyone.
+ type: bool
+ default: no
+ permissions:
+ description:
+ - The list of permission(s) to delegate (required if C(state) is C(present)).
+ type: list
+ choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
+ local:
+ description:
+ - Apply permissions to C(name) locally (C(zfs allow -l)).
+ type: bool
+ descendents:
+ description:
+ - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ type: bool
+ recursive:
+ description:
+ - Unallow permissions recursively (ignored when C(state) is C(present)).
+ type: bool
+ default: no
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: adm
+ permissions: allow,unallow
+
+- name: Grant `zfs send` to everyone, plus the group `backup`
+ community.general.zfs_delegate_admin:
+ name: rpool/myvol
+ groups: backup
+ everyone: yes
+ permissions: send
+
+- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: foo,bar
+ permissions: send,receive
+ local: yes
+
+- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ everyone: yes
+ state: absent
+'''
+
+# This module does not return anything other than the standard
+# changed/state/msg/stdout
+RETURN = '''
+'''
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZfsDelegateAdmin(object):
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params.get('name')
+ self.state = module.params.get('state')
+ self.users = module.params.get('users')
+ self.groups = module.params.get('groups')
+ self.everyone = module.params.get('everyone')
+ self.perms = module.params.get('permissions')
+ self.scope = None
+ self.changed = False
+ self.initial_perms = None
+ self.subcommand = 'allow'
+ self.recursive_opt = []
+ self.run_method = self.update
+
+ self.setup(module)
+
+ def setup(self, module):
+ """ Validate params and set up for run.
+ """
+ if self.state == 'absent':
+ self.subcommand = 'unallow'
+ if module.params.get('recursive'):
+ self.recursive_opt = ['-r']
+
+ local = module.params.get('local')
+ descendents = module.params.get('descendents')
+ if (local and descendents) or (not local and not descendents):
+ self.scope = 'ld'
+ elif local:
+ self.scope = 'l'
+ elif descendents:
+ self.scope = 'd'
+ else:
+ self.module.fail_json(msg='Impossible value for local and descendents')
+
+ if not (self.users or self.groups or self.everyone):
+ if self.state == 'present':
+ self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
+ elif self.state == 'absent':
+ self.run_method = self.clear
+ # ansible ensures the else cannot happen here
+
+ self.zfs_path = module.get_bin_path('zfs', True)
+
+ @property
+ def current_perms(self):
+ """ Parse the output of `zfs allow <name>` to retrieve current permissions.
+ """
+ out = self.run_zfs_raw(subcommand='allow')
+ perms = {
+ 'l': {'u': {}, 'g': {}, 'e': []},
+ 'd': {'u': {}, 'g': {}, 'e': []},
+ 'ld': {'u': {}, 'g': {}, 'e': []},
+ }
+ linemap = {
+ 'Local permissions:': 'l',
+ 'Descendent permissions:': 'd',
+ 'Local+Descendent permissions:': 'ld',
+ }
+ scope = None
+ for line in out.splitlines():
+ scope = linemap.get(line, scope)
+ if not scope:
+ continue
+ try:
+ if line.startswith('\tuser ') or line.startswith('\tgroup '):
+ ent_type, ent, cur_perms = line.split()
+ perms[scope][ent_type[0]][ent] = cur_perms.split(',')
+ elif line.startswith('\teveryone '):
+ perms[scope]['e'] = line.split()[1].split(',')
+ except ValueError:
+ self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
+ return perms
+
+ def run_zfs_raw(self, subcommand=None, args=None):
+ """ Run a raw zfs command, fail on error.
+ """
+ cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc:
+ self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
+ return out
+
+ def run_zfs(self, args):
+ """ Run zfs allow/unallow with appropriate options as per module arguments.
+ """
+ args = self.recursive_opt + ['-' + self.scope] + args
+ if self.perms:
+ args.append(','.join(self.perms))
+ return self.run_zfs_raw(args=args)
+
+ def clear(self):
+ """ Called by run() to clear all permissions.
+ """
+ changed = False
+ stdout = ''
+ for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
+ for ent in self.initial_perms[scope][ent_type].keys():
+ stdout += self.run_zfs(['-%s' % ent_type, ent])
+ changed = True
+ for scope in ('ld', 'l', 'd'):
+ if self.initial_perms[scope]['e']:
+ stdout += self.run_zfs(['-e'])
+ changed = True
+ return (changed, stdout)
+
+ def update(self):
+ """ Update permissions as per module arguments.
+ """
+ stdout = ''
+ for ent_type, entities in (('u', self.users), ('g', self.groups)):
+ if entities:
+ stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
+ if self.everyone:
+ stdout += self.run_zfs(['-e'])
+ return (self.initial_perms != self.current_perms, stdout)
+
+ def run(self):
+ """ Run an operation, return results for Ansible.
+ """
+ exit_args = {'state': self.state}
+ self.initial_perms = self.current_perms
+ exit_args['changed'], stdout = self.run_method()
+ if exit_args['changed']:
+ exit_args['msg'] = 'ZFS delegated admin permissions updated'
+ exit_args['stdout'] = stdout
+ self.module.exit_json(**exit_args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ users=dict(type='list'),
+ groups=dict(type='list'),
+ everyone=dict(type='bool', default=False),
+ permissions=dict(type='list',
+ choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote',
+ 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share',
+ 'snapshot', 'unallow']),
+ local=dict(type='bool'),
+ descendents=dict(type='bool'),
+ recursive=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ required_if=[('state', 'present', ['permissions'])],
+ )
+ zfs_delegate_admin = ZfsDelegateAdmin(module)
+ zfs_delegate_admin.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py
new file mode 100644
index 00000000..e7719f68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zfs_facts
+short_description: Gather facts about ZFS datasets.
+description:
+ - Gather facts from ZFS dataset properties.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS dataset name.
+ required: yes
+ aliases: [ "ds", "dataset" ]
+ recurse:
+ description:
+ - Specifies if properties for any children should be recursively
+ displayed.
+ type: bool
+ default: 'no'
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: 'no'
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zfs(1M) man page.
+ default: all
+ type:
+ description:
+ - Specifies which datasets types to display. Multiple values have to be
+ provided in comma-separated form.
+ choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
+ default: all
+ depth:
+ description:
+ - Specifies recursion depth.
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS dataset rpool/export/home
+ community.general.zfs_facts:
+ dataset: rpool/export/home
+
+- name: Report space usage on ZFS filesystems under data/home
+ community.general.zfs_facts:
+ name: data/home
+ recurse: yes
+ type: filesystem
+
+- ansible.builtin.debug:
+ msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
+ with_items: '{{ ansible_zfs_datasets }}'
+'''
+
+RETURN = '''
+name:
+ description: ZFS dataset name
+ returned: always
+ type: str
+ sample: rpool/var/spool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+recurse:
+ description: if we should recurse over ZFS dataset
+ returned: if 'recurse' is set to True
+ type: bool
+ sample: True
+zfs_datasets:
+ description: ZFS dataset facts
+ returned: always
+ type: str
+ sample:
+ {
+ "aclinherit": "restricted",
+ "aclmode": "discard",
+ "atime": "on",
+ "available": "43.8G",
+ "canmount": "on",
+ "casesensitivity": "sensitive",
+ "checksum": "on",
+ "compression": "off",
+ "compressratio": "1.00x",
+ "copies": "1",
+ "creation": "Thu Jun 16 11:37 2016",
+ "dedup": "off",
+ "devices": "on",
+ "exec": "on",
+ "filesystem_count": "none",
+ "filesystem_limit": "none",
+ "logbias": "latency",
+ "logicalreferenced": "18.5K",
+ "logicalused": "3.45G",
+ "mlslabel": "none",
+ "mounted": "yes",
+ "mountpoint": "/rpool",
+ "name": "rpool",
+ "nbmand": "off",
+ "normalization": "none",
+ "org.openindiana.caiman:install": "ready",
+ "primarycache": "all",
+ "quota": "none",
+ "readonly": "off",
+ "recordsize": "128K",
+ "redundant_metadata": "all",
+ "refcompressratio": "1.00x",
+ "referenced": "29.5K",
+ "refquota": "none",
+ "refreservation": "none",
+ "reservation": "none",
+ "secondarycache": "all",
+ "setuid": "on",
+ "sharenfs": "off",
+ "sharesmb": "off",
+ "snapdir": "hidden",
+ "snapshot_count": "none",
+ "snapshot_limit": "none",
+ "sync": "standard",
+ "type": "filesystem",
+ "used": "4.41G",
+ "usedbychildren": "4.41G",
+ "usedbydataset": "29.5K",
+ "usedbyrefreservation": "0",
+ "usedbysnapshots": "0",
+ "utf8only": "off",
+ "version": "5",
+ "vscan": "off",
+ "written": "29.5K",
+ "xattr": "on",
+ "zoned": "off"
+ }
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+
+SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
+
+
+class ZFSFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.recurse = module.params['recurse']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self.type = module.params['type']
+ self.depth = module.params['depth']
+
+ self._datasets = defaultdict(dict)
+ self.facts = []
+
+ def dataset_exists(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ if self.recurse:
+ cmd.append('-r')
+ if int(self.depth) != 0:
+ cmd.append('-d')
+ cmd.append('%s' % self.depth)
+ if self.type:
+ cmd.append('-t')
+ cmd.append(self.type)
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ dataset, property, value = line.split('\t')
+
+ self._datasets[dataset].update({property: value})
+
+ for k, v in iteritems(self._datasets):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_datasets': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
+ recurse=dict(required=False, default=False, type='bool'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
+ depth=dict(required=False, default=0, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ zfs_facts = ZFSFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zfs_facts.name
+
+ if zfs_facts.parsable:
+ result['parsable'] = zfs_facts.parsable
+
+ if zfs_facts.recurse:
+ result['recurse'] = zfs_facts.recurse
+
+ if zfs_facts.dataset_exists():
+ result['ansible_facts'] = zfs_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py
new file mode 100644
index 00000000..728c0779
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zpool_facts
+short_description: Gather facts about ZFS pools.
+description:
+ - Gather facts from ZFS pool properties.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS pool name.
+ aliases: [ "pool", "zpool" ]
+ required: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: False
+ required: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zpool(1M) man page.
+ default: all
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS pool rpool
+ community.general.zpool_facts: pool=rpool
+
+- name: Gather space usage about all imported ZFS pools
+ community.general.zpool_facts: properties='free,size'
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
+ with_items: '{{ ansible_zfs_pools }}'
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary containing all the detailed information about the ZFS pool facts
+ returned: always
+ type: complex
+ contains:
+ ansible_zfs_pools:
+ description: ZFS pool facts
+ returned: always
+ type: str
+ sample:
+ {
+ "allocated": "3.46G",
+ "altroot": "-",
+ "autoexpand": "off",
+ "autoreplace": "off",
+ "bootfs": "rpool/ROOT/openindiana",
+ "cachefile": "-",
+ "capacity": "6%",
+ "comment": "-",
+ "dedupditto": "0",
+ "dedupratio": "1.00x",
+ "delegation": "on",
+ "expandsize": "-",
+ "failmode": "wait",
+ "feature@async_destroy": "enabled",
+ "feature@bookmarks": "enabled",
+ "feature@edonr": "enabled",
+ "feature@embedded_data": "active",
+ "feature@empty_bpobj": "active",
+ "feature@enabled_txg": "active",
+ "feature@extensible_dataset": "enabled",
+ "feature@filesystem_limits": "enabled",
+ "feature@hole_birth": "active",
+ "feature@large_blocks": "enabled",
+ "feature@lz4_compress": "active",
+ "feature@multi_vdev_crash_dump": "enabled",
+ "feature@sha512": "enabled",
+ "feature@skein": "enabled",
+ "feature@spacemap_histogram": "active",
+ "fragmentation": "3%",
+ "free": "46.3G",
+ "freeing": "0",
+ "guid": "15729052870819522408",
+ "health": "ONLINE",
+ "leaked": "0",
+ "listsnapshots": "off",
+ "name": "rpool",
+ "readonly": "off",
+ "size": "49.8G",
+ "version": "-"
+ }
+name:
+ description: ZFS pool name
+ returned: always
+ type: str
+ sample: rpool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZPoolFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+
+ self._pools = defaultdict(dict)
+ self.facts = []
+
+ def pool_exists(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ if self.name:
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ pool, property, value = line.split('\t')
+
+ self._pools[pool].update({property: value})
+
+ for k, v in iteritems(self._pools):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_pools': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ zpool_facts = ZPoolFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zpool_facts.name
+
+ if zpool_facts.parsable:
+ result['parsable'] = zpool_facts.parsable
+
+ if zpool_facts.name is not None:
+ if zpool_facts.pool_exists():
+ result['ansible_facts'] = zpool_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
+ else:
+ result['ansible_facts'] = zpool_facts.get_facts()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py
new file mode 100644
index 00000000..5524beea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: supervisorctl
+short_description: Manage the state of a program or group of programs running via supervisord
+description:
+ - Manage the state of a program or group of programs running via supervisord
+options:
+ name:
+ type: str
+ description:
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
+ required: true
+ config:
+ type: path
+ description:
+ - The supervisor configuration file path
+ server_url:
+ type: str
+ description:
+ - URL on which supervisord server is listening
+ username:
+ type: str
+ description:
+ - username to use for authentication
+ password:
+ type: str
+ description:
+ - password to use for authentication
+ state:
+ type: str
+ description:
+ - The desired state of program/group.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ signal:
+ type: str
+ description:
+ - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
+ supervisorctl_path:
+ type: path
+ description:
+ - path to supervisorctl executable
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+requirements: [ "supervisorctl" ]
+author:
+ - "Matt Wright (@mattupstate)"
+ - "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program to be in started state
+ community.general.supervisorctl:
+ name: my_app
+ state: started
+
+- name: Manage the state of program group to be in started state
+ community.general.supervisorctl:
+ name: 'my_apps:'
+ state: started
+
+- name: Restart my_app, reading supervisorctl configuration from a specified file
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
+
+- name: Restart my_app, connecting to supervisord with credentials and server URL
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
+
+- name: Send a signal to my_app via supervisorctl
+ community.general.supervisorctl:
+ name: my_app
+ state: signalled
+ signal: USR1
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+
+def main():
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ config=dict(required=False, type='path'),
+ server_url=dict(type='str', required=False),
+ username=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ supervisorctl_path=dict(required=False, type='path'),
+ state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ signal=dict(type='str', required=False)
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
+ state = module.params['state']
+ config = module.params.get('config')
+ server_url = module.params.get('server_url')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ supervisorctl_path = module.params.get('supervisorctl_path')
+ signal = module.params.get('signal')
+
+ # we check error message for a pattern, so we need to make sure that's in C locale
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if supervisorctl_path:
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
+ supervisorctl_args = [supervisorctl_path]
+ else:
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ else:
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
+
+ if config:
+ supervisorctl_args.extend(['-c', config])
+ if server_url:
+ supervisorctl_args.extend(['-s', server_url])
+ if username:
+ supervisorctl_args.extend(['-u', username])
+ if password:
+ supervisorctl_args.extend(['-p', password])
+
+ if state == 'signalled' and not signal:
+ module.fail_json(msg="State 'signalled' requires a 'signal' value")
+
+ def run_supervisorctl(cmd, name=None, **kwargs):
+ args = list(supervisorctl_args) # copy the master args
+ args.append(cmd)
+ if name:
+ args.append(name)
+ return module.run_command(args, **kwargs)
+
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
+ else:
+ if process_name != name:
+ continue
+
+ matched.append((process_name, status))
+ return matched
+
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
+
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
+
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update', check_rc=True)
+ processes = get_matched_processes()
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
+
+ processes = get_matched_processes()
+
+ if state == 'absent':
+ if len(processes) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('remove', name)
+ if '%s: removed process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'started':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
+
+ if state == 'stopped':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
+
+ if state == 'signalled':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py
new file mode 100644
index 00000000..e9215670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: svc
+author:
+- Brian Coca (@bcoca)
+short_description: Manage daemontools services
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ downed:
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ Defaults to no. Downed does not imply stopped.
+ type: bool
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ Take note that a service can be enabled and downed (no auto restart).
+ type: bool
+ service_dir:
+ description:
+ - Directory svscan watches for services
+ type: str
+ default: /service
+ service_src:
+ description:
+ - Directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/service
+'''
+
+EXAMPLES = '''
+- name: Start svc dnscache, if not running
+ community.general.svc:
+ name: dnscache
+ state: started
+
+- name: Stop svc dnscache, if running
+ community.general.svc:
+ name: dnscache
+ state: stopped
+
+- name: Kill svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: killed
+
+- name: Restart svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: restarted
+
+- name: Reload svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+
+- name: Using alternative svc directory location
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
+'''
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = ['/command', '/usr/local/bin']
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+ self.execute_command([self.svc_cmd, '-dx', self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd, '-dx', src_log])
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search(r'\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r'(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ downed=dict(type='bool'),
+ service_dir=dict(type='str', default='/service'),
+ service_src=dict(type='str', default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc, state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py
new file mode 100644
index 00000000..21d17f4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+author: "Boyd Adamson (@brontitall)"
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+ type: str
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when C(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
+ type: str
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if C(src) is a URL.
+ type: str
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ type: str
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ type: str
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install a package from an already copied file
+ community.general.svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
+
+- name: Install a package directly from an http site
+ community.general.svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
+
+- name: Install a package with a response file
+ community.general.svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
+
+- name: Ensure that a package is not installed
+ community.general.svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
+
+- name: Ensure that a category is not installed
+ community.general.svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
+'''
+
+
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True)]
+ cmd.append('-q')
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = '''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = ['pkgadd', '-n']
+ if zone == 'current':
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
+ if proxy is not None:
+ cmd += ['-x', proxy]
+ if response_file is not None:
+ cmd += ['-r', response_file]
+ if category:
+ cmd += ['-Y']
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
+ else:
+ cmd = ['pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py
new file mode 100644
index 00000000..7e9db835
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+notes: []
+author: "Raul Melo (@melodous)"
+options:
+ name:
+ description:
+ - package name.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ choices: [ 'present', 'latest', 'absent']
+ type: str
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install a package
+ community.general.swdepot:
+ name: unzip-6.0
+ state: present
+ depot: 'repository:/path'
+
+- name: Install the latest version of a package
+ community.general.swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- name: Remove a package
+ community.general.swdepot:
+ name: unzip
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 first greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ normalized_version1 = normalize(version1)
+ normalized_version2 = normalize(version2)
+ if normalized_version1 == normalized_version2:
+ rc = 0
+ elif normalized_version1 < normalized_version2:
+ rc = -1
+ else:
+ rc = 1
+ return rc
+
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
+ use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if (state == 'present' or state == 'latest') and depot is None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ # Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if (state == 'present' or state == 'latest') and installed is False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed is True:
+ # Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed, version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed is True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py
new file mode 100644
index 00000000..4dac01be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swupd
+short_description: Manages updates and bundles in ClearLinux systems.
+description:
+ - Manages updates and bundles with the swupd bundle manager, which is used by the
+ Clear Linux Project for Intel Architecture.
+author: Alberto Murillo (@albertomurillo)
+options:
+ contenturl:
+ description:
+ - URL pointing to the contents of available bundles.
+ If not specified, the contents are retrieved from clearlinux.org.
+ type: str
+ format:
+ description:
+ - The format suffix for version file downloads. For example [1,2,3,staging,etc].
+ If not specified, the default format is used.
+ type: str
+ manifest:
+ description:
+ - The manifest contains information about the bundles at certain version of the OS.
+ Specify a Manifest version to verify against that version or leave unspecified to
+ verify against the current version.
+ aliases: [release, version]
+ type: int
+ name:
+ description:
+ - Name of the (I)bundle to install or remove.
+ aliases: [bundle]
+ type: str
+ state:
+ description:
+ - Indicates the desired (I)bundle state. C(present) ensures the bundle
+ is installed while C(absent) ensures the (I)bundle is not installed.
+ default: present
+ choices: [present, absent]
+ type: str
+ update:
+ description:
+ - Updates the OS to the latest version.
+ type: bool
+ default: false
+ url:
+ description:
+ - Overrides both I(contenturl) and I(versionurl).
+ type: str
+ verify:
+ description:
+ - Verify content for OS version.
+ type: bool
+ default: false
+ versionurl:
+ description:
+ - URL for version string download.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Update the OS to the latest version
+ community.general.swupd:
+ update: yes
+
+- name: Installs the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: present
+
+- name: Removes the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: absent
+
+- name: Check integrity of filesystem
+ community.general.swupd:
+ verify: yes
+
+- name: Downgrade OS to release 12920
+ community.general.swupd:
+ verify: yes
+ manifest: 12920
+'''
+
+RETURN = '''
+stdout:
+ description: stdout of swupd
+ returned: always
+ type: str
+stderr:
+ description: stderr of swupd
+ returned: always
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Swupd(object):
+ FILES_NOT_MATCH = "files did not match"
+ FILES_REPLACED = "missing files were replaced"
+ FILES_FIXED = "files were fixed"
+ FILES_DELETED = "files were deleted"
+
+ def __init__(self, module):
+ # Fail if swupd is not found
+ self.module = module
+ self.swupd_cmd = module.get_bin_path("swupd", False)
+ if not self.swupd_cmd:
+ module.fail_json(msg="Could not find swupd.")
+
+ # Initialize parameters
+ for key in module.params.keys():
+ setattr(self, key, module.params[key])
+
+ # Initialize return values
+ self.changed = False
+ self.failed = False
+ self.msg = None
+ self.rc = None
+ self.stderr = ""
+ self.stdout = ""
+
+ def _run_cmd(self, cmd):
+ self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
+
+ def _get_cmd(self, command):
+ cmd = "%s %s" % (self.swupd_cmd, command)
+
+ if self.format:
+ cmd += " --format=%s" % self.format
+ if self.manifest:
+ cmd += " --manifest=%s" % self.manifest
+ if self.url:
+ cmd += " --url=%s" % self.url
+ else:
+ if self.contenturl and command != "check-update":
+ cmd += " --contenturl=%s" % self.contenturl
+ if self.versionurl:
+ cmd += " --versionurl=%s" % self.versionurl
+
+ return cmd
+
+ def _is_bundle_installed(self, bundle):
+ try:
+ os.stat("/usr/share/clear/bundles/%s" % bundle)
+ except OSError:
+ return False
+
+ return True
+
+ def _needs_update(self):
+ cmd = self._get_cmd("check-update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ return True
+
+ if self.rc == 1:
+ return False
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def _needs_verify(self):
+ cmd = self._get_cmd("verify")
+ self._run_cmd(cmd)
+
+ if self.rc != 0:
+ self.failed = True
+ self.msg = "Failed to check for filesystem inconsistencies."
+
+ if self.FILES_NOT_MATCH in self.stdout:
+ return True
+
+ return False
+
+ def install_bundle(self, bundle):
+ """Installs a bundle with `swupd bundle-add bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=not self._is_bundle_installed(bundle))
+
+ if self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s is already installed" % bundle
+ return
+
+ cmd = self._get_cmd("bundle-add %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s installed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to install bundle %s" % bundle
+
+ def remove_bundle(self, bundle):
+ """Removes a bundle with `swupd bundle-remove bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._is_bundle_installed(bundle))
+
+ if not self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s not installed"
+ return
+
+ cmd = self._get_cmd("bundle-remove %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s removed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to remove bundle %s" % bundle
+
+ def update_os(self):
+ """Updates the os with `swupd update`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_update())
+
+ if not self._needs_update():
+ self.msg = "There are no updates available"
+ return
+
+ cmd = self._get_cmd("update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Update successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def verify_os(self):
+ """Verifies filesystem against specified or current version"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_verify())
+
+ if not self._needs_verify():
+ self.msg = "No files where changed"
+ return
+
+ cmd = self._get_cmd("verify --fix")
+ self._run_cmd(cmd)
+
+ if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
+ self.changed = True
+ self.msg = "Fix successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to verify the OS"
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ contenturl=dict(type="str"),
+ format=dict(type="str"),
+ manifest=dict(aliases=["release", "version"], type="int"),
+ name=dict(aliases=["bundle"], type="str"),
+ state=dict(default="present", choices=["present", "absent"], type="str"),
+ update=dict(default=False, type="bool"),
+ url=dict(type="str"),
+ verify=dict(default=False, type="bool"),
+ versionurl=dict(type="str"),
+ ),
+ required_one_of=[["name", "update", "verify"]],
+ mutually_exclusive=[["name", "update", "verify"]],
+ supports_check_mode=True
+ )
+
+ swupd = Swupd(module)
+
+ name = module.params["name"]
+ state = module.params["state"]
+ update = module.params["update"]
+ verify = module.params["verify"]
+
+ if update:
+ swupd.update_os()
+ elif verify:
+ swupd.verify_os()
+ elif state == "present":
+ swupd.install_bundle(name)
+ elif state == "absent":
+ swupd.remove_bundle(name)
+ else:
+ swupd.failed = True
+
+ if swupd.failed:
+ module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+ else:
+ module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py
new file mode 100644
index 00000000..7f4f899f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syslogger
+short_description: Log messages in the syslog
+description:
+ - Uses syslog to add log entries to the host.
+options:
+ msg:
+ type: str
+ description:
+ - This is the message to place in syslog.
+ required: True
+ priority:
+ type: str
+ description:
+ - Set the log priority.
+ choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
+ default: "info"
+ facility:
+ type: str
+ description:
+ - Set the log facility.
+ choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
+ "uucp", "cron", "syslog", "local0", "local1", "local2",
+ "local3", "local4", "local5", "local6", "local7" ]
+ default: "daemon"
+ log_pid:
+ description:
+ - Log the PID in brackets.
+ type: bool
+ default: False
+ ident:
+ description:
+ - Specify the name of application name which is sending the log to syslog.
+ type: str
+ default: 'ansible_syslogger'
+ version_added: '0.2.0'
+author:
+ - Tim Rightnour (@garbled1)
+'''
+
+EXAMPLES = r'''
+- name: Simple Usage
+ community.general.syslogger:
+ msg: "I will end up as daemon.info"
+
+- name: Send a log message with err priority and user facility with log_pid
+ community.general.syslogger:
+ msg: "Hello from Ansible"
+ priority: "err"
+ facility: "user"
+ log_pid: true
+
+- name: Specify the name of application which is sending log message
+ community.general.syslogger:
+ ident: "MyApp"
+ msg: "I want to believe"
+ priority: "alert"
+'''
+
+RETURN = r'''
+ident:
+ description: Name of application sending the message to log
+ returned: always
+ type: str
+ sample: "ansible_syslogger"
+ version_added: '0.2.0'
+priority:
+ description: Priority level
+ returned: always
+ type: str
+ sample: "daemon"
+facility:
+ description: Syslog facility
+ returned: always
+ type: str
+ sample: "info"
+log_pid:
+ description: Log PID status
+ returned: always
+ type: bool
+ sample: True
+msg:
+ description: Message sent to syslog
+ returned: always
+ type: str
+ sample: "Hello from Ansible"
+'''
+
+import syslog
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_facility(facility):
+ return {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'syslog': syslog.LOG_SYSLOG,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+ }.get(facility, syslog.LOG_DAEMON)
+
+
+def get_priority(priority):
+ return {
+ 'emerg': syslog.LOG_EMERG,
+ 'alert': syslog.LOG_ALERT,
+ 'crit': syslog.LOG_CRIT,
+ 'err': syslog.LOG_ERR,
+ 'warning': syslog.LOG_WARNING,
+ 'notice': syslog.LOG_NOTICE,
+ 'info': syslog.LOG_INFO,
+ 'debug': syslog.LOG_DEBUG
+ }.get(priority, syslog.LOG_INFO)
+
+
+def main():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ ident=dict(type='str', default='ansible_syslogger'),
+ msg=dict(type='str', required=True),
+ priority=dict(type='str', required=False,
+ choices=["emerg", "alert", "crit", "err", "warning",
+ "notice", "info", "debug"],
+ default='info'),
+ facility=dict(type='str', required=False,
+ choices=["kern", "user", "mail", "daemon", "auth",
+ "lpr", "news", "uucp", "cron", "syslog",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"],
+ default='daemon'),
+ log_pid=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ )
+
+ result = dict(
+ changed=False,
+ ident=module.params['ident'],
+ priority=module.params['priority'],
+ facility=module.params['facility'],
+ log_pid=module.params['log_pid'],
+ msg=module.params['msg']
+ )
+
+ # do the logging
+ try:
+ syslog.openlog(module.params['ident'],
+ syslog.LOG_PID if module.params['log_pid'] else 0,
+ get_facility(module.params['facility']))
+ syslog.syslog(get_priority(module.params['priority']),
+ module.params['msg'])
+ syslog.closelog()
+ result['changed'] = True
+
+ except Exception as exc:
+ module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py
new file mode 100644
index 00000000..2483fb36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019-2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syspatch
+
+short_description: Manage OpenBSD system patches
+
+
+description:
+ - "Manage OpenBSD system patches using syspatch."
+
+options:
+ apply:
+ type: bool
+ description:
+ - Apply all available system patches.
+ - By default, apply all patches.
+ - Deprecated. Will be removed in community.general 3.0.0.
+ default: yes
+ revert:
+ description:
+ - Revert system patches.
+ type: str
+ choices: [ all, one ]
+
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = '''
+- name: Apply all available system patches
+ community.general.syspatch:
+ apply: true
+
+- name: Revert last patch
+ community.general.syspatch:
+ revert: one
+
+- name: Revert all patches
+ community.general.syspatch:
+ revert: all
+
+# NOTE: You can reboot automatically if a patch requires it:
+- name: Apply all patches and store result
+ community.general.syspatch:
+ apply: true
+ register: syspatch
+
+- name: Reboot if patch requires it
+ ansible.builtin.reboot:
+ when: syspatch.reboot_needed
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+stdout:
+ description: syspatch standard output.
+ returned: always
+ type: str
+ sample: "001_rip6cksum"
+stderr:
+ description: syspatch standard error.
+ returned: always
+ type: str
+ sample: "syspatch: need root privileges"
+reboot_needed:
+ description: Whether or not a reboot is required after an update.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ apply=dict(type='bool', default=True, removed_in_version='3.0.0', removed_from_collection='community.general'),
+ revert=dict(type='str', choices=['all', 'one'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_one_of=[['apply', 'revert']]
+ )
+
+ result = syspatch_run(module)
+
+ module.exit_json(**result)
+
+
+def syspatch_run(module):
+ cmd = module.get_bin_path('syspatch', True)
+ changed = False
+ reboot_needed = False
+ warnings = []
+
+ # Set safe defaults for run_flag and check_flag
+ run_flag = ['-c']
+ check_flag = ['-c']
+ if module.params['revert']:
+ check_flag = ['-l']
+
+ if module.params['revert'] == 'all':
+ run_flag = ['-R']
+ else:
+ run_flag = ['-r']
+ elif module.params['apply']:
+ check_flag = ['-c']
+ run_flag = []
+
+ # Run check command
+ rc, out, err = module.run_command([cmd] + check_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+
+ if len(out) > 0:
+ # Changes pending
+ change_pending = True
+ else:
+ # No changes pending
+ change_pending = False
+
+ if module.check_mode:
+ changed = change_pending
+ elif change_pending:
+ rc, out, err = module.run_command([cmd] + run_flag)
+
+ # Workaround syspatch ln bug:
+ # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
+ if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('create unique kernel') >= 0:
+ # Kernel update applied
+ reboot_needed = True
+ elif out.lower().find('syspatch updated itself') >= 0:
+ warnings.append('Syspatch was updated. Please run syspatch again.')
+
+ # If no stdout, then warn user
+ if len(out) == 0:
+ warnings.append('syspatch had suggested changes, but stdout was empty.')
+
+ changed = True
+ else:
+ changed = False
+
+ return dict(
+ changed=changed,
+ reboot_needed=reboot_needed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py
new file mode 100644
index 00000000..89468059
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_devices
+short_description: Manages AIX devices
+description:
+- This module discovers, defines, removes and modifies attributes of AIX devices.
+options:
+ attributes:
+ description:
+ - A list of device attributes.
+ type: dict
+ device:
+ description:
+ - The name of the device.
+ - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ type: str
+ force:
+ description:
+ - Forces action.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Removes or defines a device and children devices.
+ type: bool
+ default: no
+ state:
+ description:
+ - Controls the device state.
+ - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
+ - C(removed) (alias C(absent) removes a device.
+ - C(defined) changes device to Defined state.
+ type: str
+ choices: [ available, defined, removed ]
+ default: available
+'''
+
+EXAMPLES = r'''
+- name: Scan new devices
+ community.general.aix_devices:
+ device: all
+ state: available
+
+- name: Scan new virtual devices (vio0)
+ community.general.aix_devices:
+ device: vio0
+ state: available
+
+- name: Removing IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2
+ community.general.aix_devices:
+ device: ent2
+ state: removed
+
+- name: Put device en2 in Defined
+ community.general.aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ community.general.aix_devices:
+ device: ent4
+ state: removed
+
+- name: Put device en4 in Defined (inexistent)
+ community.general.aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: removed
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: off
+ state: available
+
+- name: Configure IP, netmask and set en1 up.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: available
+
+- name: Adding IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: available
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_device(module, device):
+ """
+ Check if device already exists and the state.
+ Args:
+ module: Ansible module.
+ device: device to be checked.
+
+ Returns: bool, device state
+
+ """
+ lsdev_cmd = module.get_bin_path('lsdev', True)
+ rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
+
+ if lsdev_out:
+ device_state = lsdev_out.split()[1]
+ return True, device_state
+
+ device_state = None
+ return False, device_state
+
+
+def _check_device_attr(module, device, attr):
+ """
+
+ Args:
+ module: Ansible module.
+ device: device to check attributes.
+ attr: attribute to be checked.
+
+ Returns:
+
+ """
+ lsattr_cmd = module.get_bin_path('lsattr', True)
+ rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
+
+ hidden_attrs = ['delalias4', 'delalias6']
+
+ if rc == 255:
+
+ if attr in hidden_attrs:
+ current_param = ''
+ else:
+ current_param = None
+
+ return current_param
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
+
+ current_param = lsattr_out.split()[1]
+ return current_param
+
+
+def discover_device(module, device):
+ """ Discover AIX devices."""
+ cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
+
+ if device is not None:
+ device = "-l %s" % device
+
+ else:
+ device = ''
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
+ changed = True
+ msg = cfgmgr_out
+
+ return changed, msg
+
+
+def change_device_attr(module, attributes, device, force):
+ """ Change AIX device attribute. """
+
+ attr_changed = []
+ attr_not_changed = []
+ attr_invalid = []
+ chdev_cmd = module.get_bin_path('chdev', True)
+
+ for attr in list(attributes.keys()):
+ new_param = attributes[attr]
+ current_param = _check_device_attr(module, device, attr)
+
+ if current_param is None:
+ attr_invalid.append(attr)
+
+ elif current_param != new_param:
+ if force:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
+ else:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
+
+ if not module.check_mode:
+ rc, chdev_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
+
+ attr_changed.append(attributes[attr])
+ else:
+ attr_not_changed.append(attributes[attr])
+
+ if len(attr_changed) > 0:
+ changed = True
+ attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
+ else:
+ changed = False
+ attr_changed_msg = ''
+
+ if len(attr_not_changed) > 0:
+ attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
+ else:
+ attr_not_changed_msg = ''
+
+ if len(attr_invalid) > 0:
+ attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
+ else:
+ attr_invalid_msg = ''
+
+ msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
+
+ return changed, msg
+
+
+def remove_device(module, device, force, recursive, state):
+ """ Puts device in defined state or removes device. """
+
+ state_opt = {
+ 'removed': '-d',
+ 'absent': '-d',
+ 'defined': ''
+ }
+
+ recursive_opt = {
+ True: '-R',
+ False: ''
+ }
+
+ recursive = recursive_opt[recursive]
+ state = state_opt[state]
+
+ changed = True
+ msg = ''
+ rmdev_cmd = module.get_bin_path('rmdev', True)
+
+ if not module.check_mode:
+ if state:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
+ else:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
+
+ msg = rmdev_out
+
+ return changed, msg
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ attributes=dict(type='dict'),
+ device=dict(type='str'),
+ force=dict(type='bool', default=False),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
+ ),
+ supports_check_mode=True,
+ )
+
+ force_opt = {
+ True: '-f',
+ False: '',
+ }
+
+ attributes = module.params['attributes']
+ device = module.params['device']
+ force = force_opt[module.params['force']]
+ recursive = module.params['recursive']
+ state = module.params['state']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'available' or state == 'present':
+ if attributes:
+ # change attributes on device
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ # discovery devices (cfgmgr)
+ if device and device != 'all':
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ # run cfgmgr on specific device
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ elif state == 'removed' or state == 'absent' or state == 'defined':
+ if not device:
+ result['msg'] = "device is required to removed or defined state."
+
+ else:
+ # Remove device
+ check_device, device_state = _check_device(module, device)
+ if check_device:
+ if state == 'defined' and device_state == 'Defined':
+ result['changed'] = False
+ result['msg'] = 'Device %s already in Defined' % device
+
+ else:
+ result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py
new file mode 100644
index 00000000..58a5c25d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_filesystem
+short_description: Configure LVM and NFS file systems for AIX
+description:
+ - This module creates, removes, mount and unmount LVM and NFS file system for
+ AIX using C(/etc/filesystems).
+ - For LVM file systems is possible to resize a file system.
+options:
+ account_subsystem:
+ description:
+ - Specifies whether the file system is to be processed by the accounting subsystem.
+ type: bool
+ default: no
+ attributes:
+ description:
+ - Specifies attributes for files system separated by comma.
+ type: list
+ elements: str
+ default: agblksize='4096',isnapshot='no'
+ auto_mount:
+ description:
+ - File system is automatically mounted at system restart.
+ type: bool
+ default: yes
+ device:
+ description:
+ - Logical volume (LV) device name or remote export device to create a NFS file system.
+ - It is used to create a file system on an already existing logical volume or the exported NFS file system.
+ - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ type: str
+ fs_type:
+ description:
+ - Specifies the virtual file system type.
+ type: str
+ default: jfs2
+ permissions:
+ description:
+ - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ type: str
+ choices: [ ro, rw ]
+ default: rw
+ mount_group:
+ description:
+ - Specifies the mount group.
+ type: str
+ filesystem:
+ description:
+ - Specifies the mount point, which is the directory where the file system will be mounted.
+ type: str
+ required: true
+ nfs_server:
+ description:
+ - Specifies a Network File System (NFS) server.
+ type: str
+ rm_mount_point:
+ description:
+ - Removes the mount point directory when used with state C(absent).
+ type: bool
+ default: no
+ size:
+ description:
+ - Specifies the file system size.
+ - For already C(present) it will be resized.
+ - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
+ it will be in Megabytes. If the value has G specified it will be in
+ Gigabytes.
+ - If no M or G the value will be 512-byte blocks.
+ - If "+" is specified in begin of value, the value will be added.
+ - If "-" is specified in begin of value, the value will be removed.
+ - If "+" or "-" is not specified, the total value will be the specified.
+ - Size will respects the LVM AIX standards.
+ type: str
+ state:
+ description:
+ - Controls the file system state.
+ - C(present) check if file system exists, creates or resize.
+ - C(absent) removes existing file system if already C(unmounted).
+ - C(mounted) checks if the file system is mounted or mount the file system.
+ - C(unmounted) check if the file system is unmounted or unmount the file system.
+ type: str
+ choices: [ absent, mounted, present, unmounted ]
+ default: present
+ vg:
+ description:
+ - Specifies an existing volume group (VG).
+ type: str
+notes:
+ - For more C(attributes), please check "crfs" AIX manual.
+'''
+
+EXAMPLES = r'''
+- name: Create filesystem in a previously defined logical volume.
+ community.general.aix_filesystem:
+ device: testlv
+ community.general.filesystem: /testfs
+ state: present
+
+- name: Creating NFS filesystem from nfshost.
+ community.general.aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ community.general.filesystem: /home/ftp
+ state: present
+
+- name: Creating a new file system without a previously logical volume.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+- name: Unmounting /testfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /testfs
+ state: unmounted
+
+- name: Resizing /mksysb to +512M.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Remove NFS filesystem /home/ftp.
+ community.general.aix_filesystem:
+ community.general.filesystem: /home/ftp
+ rm_mount_point: yes
+ state: absent
+
+- name: Remove /newfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for aix_filesystems actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._mount import ismount
+import re
+
+
+def _fs_exists(module, filesystem):
+ """
+ Check if file system already exists on /etc/filesystems.
+
+ :param module: Ansible module.
+ :param community.general.filesystem: filesystem name.
+ :return: True or False.
+ """
+ lsfs_cmd = module.get_bin_path('lsfs', True)
+ rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
+ if rc == 1:
+ if re.findall("No record matching", err):
+ return False
+
+ else:
+ module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
+
+ else:
+
+ return True
+
+
+def _check_nfs_device(module, nfs_host, device):
+ """
+ Validate if NFS server is exporting the device (remote export).
+
+ :param module: Ansible module.
+ :param nfs_host: nfs_host parameter, NFS server.
+ :param device: device parameter, remote export.
+ :return: True or False.
+ """
+ showmount_cmd = module.get_bin_path('showmount', True)
+ rc, showmount_out, err = module.run_command(
+ "%s -a %s" % (showmount_cmd, nfs_host))
+ if rc != 0:
+ module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
+ else:
+ showmount_data = showmount_out.splitlines()
+ for line in showmount_data:
+ if line.split(':')[1] == device:
+ return True
+
+ return False
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group %s is in varyoff state." % vg
+ return False, msg
+ elif vg in current_active_vgs:
+ msg = "Volume group %s is in varyon state." % vg
+ return True, msg
+ else:
+ msg = "Volume group %s does not exist." % vg
+ return None, msg
+
+
+def resize_fs(module, filesystem, size):
+ """ Resize LVM file system. """
+
+ chfs_cmd = module.get_bin_path('chfs', True)
+ if not module.check_mode:
+ rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem))
+
+ if rc == 28:
+ changed = False
+ return changed, chfs_out
+ elif rc != 0:
+ if re.findall('Maximum allocation for logical', err):
+ changed = False
+ return changed, err
+ else:
+ module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
+
+ else:
+ if re.findall('The filesystem size is already', chfs_out):
+ changed = False
+ else:
+ changed = True
+
+ return changed, chfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
+ account_subsystem, permissions, nfs_server, attributes):
+ """ Create LVM file system or NFS remote mount point. """
+
+ attributes = ' -a '.join(attributes)
+
+ # Parameters definition.
+ account_subsys_opt = {
+ True: '-t yes',
+ False: '-t no'
+ }
+
+ if nfs_server is not None:
+ auto_mount_opt = {
+ True: '-A',
+ False: '-a'
+ }
+
+ else:
+ auto_mount_opt = {
+ True: '-A yes',
+ False: '-A no'
+ }
+
+ if size is None:
+ size = ''
+ else:
+ size = "-a size=%s" % size
+
+ if device is None:
+ device = ''
+ else:
+ device = "-d %s" % device
+
+ if vg is None:
+ vg = ''
+ else:
+ vg_state, msg = _validate_vg(module, vg)
+ if vg_state:
+ vg = "-g %s" % vg
+ else:
+ changed = False
+
+ return changed, msg
+
+ if mount_group is None:
+ mount_group = ''
+
+ else:
+ mount_group = "-u %s" % mount_group
+
+ auto_mount = auto_mount_opt[auto_mount]
+ account_subsystem = account_subsys_opt[account_subsystem]
+
+ if nfs_server is not None:
+ # Creates a NFS file system.
+ mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
+ if not module.check_mode:
+ rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % (
+ mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "NFS file system %s created." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+ else:
+ # Creates a LVM file system.
+ crfs_cmd = module.get_bin_path('crfs', True)
+ if not module.check_mode:
+ cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % (
+ crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)
+ rc, crfs_out, err = module.run_command(cmd)
+
+ if rc == 10:
+ module.exit_json(
+ msg="Using a existent previously defined logical volume, "
+ "volume group needs to be empty. %s" % err)
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+
+ else:
+ changed = True
+ return changed, crfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def remove_fs(module, filesystem, rm_mount_point):
+ """ Remove an LVM file system or NFS entry. """
+
+ # Command parameters.
+ rm_mount_point_opt = {
+ True: '-r',
+ False: ''
+ }
+
+ rm_mount_point = rm_mount_point_opt[rm_mount_point]
+
+ rmfs_cmd = module.get_bin_path('rmfs', True)
+ if not module.check_mode:
+ cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem)
+ rc, rmfs_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+ else:
+ changed = True
+ msg = rmfs_out
+ if not rmfs_out:
+ msg = "File system %s removed." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def mount_fs(module, filesystem):
+ """ Mount a file system. """
+ mount_cmd = module.get_bin_path('mount', True)
+
+ if not module.check_mode:
+ rc, mount_out, err = module.run_command(
+ "%s %s" % (mount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s mounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def unmount_fs(module, filesystem):
+ """ Unmount a file system."""
+ unmount_cmd = module.get_bin_path('unmount', True)
+
+ if not module.check_mode:
+ rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s unmounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_subsystem=dict(type='bool', default=False),
+ attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ auto_mount=dict(type='bool', default=True),
+ device=dict(type='str'),
+ filesystem=dict(type='str', required=True),
+ fs_type=dict(type='str', default='jfs2'),
+ permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
+ mount_group=dict(type='str'),
+ nfs_server=dict(type='str'),
+ rm_mount_point=dict(type='bool', default=False),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
+ vg=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ account_subsystem = module.params['account_subsystem']
+ attributes = module.params['attributes']
+ auto_mount = module.params['auto_mount']
+ device = module.params['device']
+ fs_type = module.params['fs_type']
+ permissions = module.params['permissions']
+ mount_group = module.params['mount_group']
+ filesystem = module.params['filesystem']
+ nfs_server = module.params['nfs_server']
+ rm_mount_point = module.params['rm_mount_point']
+ size = module.params['size']
+ state = module.params['state']
+ vg = module.params['vg']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'present':
+ fs_mounted = ismount(filesystem)
+ fs_exists = _fs_exists(module, filesystem)
+
+ # Check if fs is mounted or exists.
+ if fs_mounted or fs_exists:
+ result['msg'] = "File system %s already exists." % filesystem
+ result['changed'] = False
+
+ # If parameter size was passed, resize fs.
+ if size is not None:
+ result['changed'], result['msg'] = resize_fs(module, filesystem, size)
+
+ # If fs doesn't exist, create it.
+ else:
+ # Check if fs will be a NFS device.
+ if nfs_server is not None:
+ if device is None:
+ result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from NFS export.
+ if _check_nfs_device(module, nfs_server, device):
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is None:
+ if vg is None:
+ result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is not None and nfs_server is None:
+ # Create a fs from a previously lv device.
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ elif state == 'absent':
+ if ismount(filesystem):
+ result['msg'] = "File system %s mounted." % filesystem
+
+ else:
+ fs_status = _fs_exists(module, filesystem)
+ if not fs_status:
+ result['msg'] = "File system %s does not exist." % filesystem
+ else:
+ result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
+
+ elif state == 'mounted':
+ if ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already mounted." % filesystem
+ else:
+ result['changed'], result['msg'] = mount_fs(module, filesystem)
+
+ elif state == 'unmounted':
+ if not ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already unmounted." % filesystem
+ else:
+ result['changed'], result['msg'] = unmount_fs(module, filesystem)
+
+ else:
+ # Unreachable codeblock
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py
new file mode 100644
index 00000000..c2daface
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Joris Weijters <joris.weijters@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Joris Weijters (@molekuul)
+module: aix_inittab
+short_description: Manages the inittab on AIX
+description:
+ - Manages the inittab on AIX.
+options:
+ name:
+ description:
+ - Name of the inittab entry.
+ type: str
+ required: yes
+ aliases: [ service ]
+ runlevel:
+ description:
+ - Runlevel of the entry.
+ type: str
+ required: yes
+ action:
+ description:
+ - Action what the init has to do with this entry.
+ type: str
+ choices:
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
+ command:
+ description:
+ - What command has to run.
+ type: str
+ required: yes
+ insertafter:
+ description:
+ - After which inittabline should the new entry inserted.
+ type: str
+ state:
+ description:
+ - Whether the entry should be present or absent in the inittab file.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The changes are persistent across reboots.
+ - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
+ - Tested on AIX 7.1.
+requirements:
+- itertools
+'''
+
+EXAMPLES = '''
+# Add service startmyservice to the inittab, directly after service existingservice.
+- name: Add startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 4
+ action: once
+ command: echo hello
+ insertafter: existingservice
+ state: present
+ become: yes
+
+# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
+- name: Change startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: present
+ become: yes
+
+- name: Remove startmyservice from inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: absent
+ become: yes
+'''
+
+RETURN = '''
+name:
+ description: Name of the adjusted inittab entry
+ returned: always
+ type: str
+ sample: startmyservice
+msg:
+ description: Action done with the inittab entry
+ returned: changed
+ type: str
+ sample: changed inittab entry startmyservice
+changed:
+ description: Whether the inittab changed or not
+ returned: always
+ type: bool
+ sample: true
+'''
+
+# Import necessary libraries
+try:
+ # python 2
+ from itertools import izip
+except ImportError:
+ izip = zip
+
+from ansible.module_utils.basic import AnsibleModule
+
+# end import modules
+# start defining the functions
+
+
+def check_current_entry(module):
+ # Check if entry exists, if not return False in exists in return dict,
+ # if true return True and the entry in return dict
+ existsdict = {'exist': False}
+ lsitab = module.get_bin_path('lsitab')
+ (rc, out, err) = module.run_command([lsitab, module.params['name']])
+ if rc == 0:
+ keys = ('name', 'runlevel', 'action', 'command')
+ values = out.split(":")
+ # strip non readable characters as \n
+ values = map(lambda s: s.strip(), values)
+ existsdict = dict(izip(keys, values))
+ existsdict.update({'exist': True})
+ return existsdict
+
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['service']),
+ runlevel=dict(type='str', required=True),
+ action=dict(type='str', choices=[
+ 'boot',
+ 'bootwait',
+ 'hold',
+ 'initdefault',
+ 'off',
+ 'once',
+ 'ondemand',
+ 'powerfail',
+ 'powerwait',
+ 'respawn',
+ 'sysinit',
+ 'wait',
+ ]),
+ command=dict(type='str', required=True),
+ insertafter=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ result = {
+ 'name': module.params['name'],
+ 'changed': False,
+ 'msg': ""
+ }
+
+ # Find commandline strings
+ mkitab = module.get_bin_path('mkitab')
+ rmitab = module.get_bin_path('rmitab')
+ chitab = module.get_bin_path('chitab')
+ rc = 0
+
+ # check if the new entry exists
+ current_entry = check_current_entry(module)
+
+ # if action is install or change,
+ if module.params['state'] == 'present':
+
+ # create new entry string
+ new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
+ ":" + module.params['action'] + ":" + module.params['command']
+
+ # If current entry exists or fields are different(if the entry does not
+ # exists, then the entry wil be created
+ if (not current_entry['exist']) or (
+ module.params['runlevel'] != current_entry['runlevel'] or
+ module.params['action'] != current_entry['action'] or
+ module.params['command'] != current_entry['command']):
+
+ # If the entry does exist then change the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command([chitab, new_entry])
+ if rc != 0:
+ module.fail_json(
+ msg="could not change inittab", rc=rc, err=err)
+ result['msg'] = "changed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ # If the entry does not exist create the entry
+ elif not current_entry['exist']:
+ if module.params['insertafter']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, '-i', module.params['insertafter'], new_entry])
+ else:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, new_entry])
+
+ if rc != 0:
+ module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
+ result['msg'] = "add inittab entry" + " " + module.params['name']
+ result['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # If the action is remove and the entry exists then remove the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [rmitab, module.params['name']])
+ if rc != 0:
+ module.fail_json(
+ msg="could not remove entry from inittab)", rc=rc, err=err)
+ result['msg'] = "removed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py
new file mode 100644
index 00000000..569711f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_lvg
+short_description: Manage LVM volume groups on AIX
+description:
+- This module creates, removes or resize volume groups on AIX LVM.
+options:
+ force:
+ description:
+ - Force volume group creation.
+ type: bool
+ default: no
+ pp_size:
+ description:
+ - The size of the physical partition in megabytes.
+ type: int
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (C(present) state) the volume group.
+ - If not informed reducing (C(absent) state) the volume group will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ type: str
+ choices: [ absent, present, varyoff, varyon ]
+ default: present
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ vg_type:
+ description:
+ - The type of the volume group.
+ type: str
+ choices: [ big, normal, scalable ]
+ default: normal
+notes:
+- AIX will permit remove VG only if all LV/Filesystems are not busy.
+- Module does not modify PP size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ pp_size: 128
+ vg_type: scalable
+ state: present
+
+- name: Removing a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ state: absent
+
+- name: Extending rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: present
+
+- name: Reducing rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _validate_pv(module, vg, pvs):
+ """
+ Function to validate if the physical volume (PV) is not already in use by
+ another volume group or Oracle ASM.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume group name.
+ :param pvs: Physical volume list.
+ :return: [bool, message] or module.fail_json for errors.
+ """
+
+ lspv_cmd = module.get_bin_path('lspv', True)
+ rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
+
+ for pv in pvs:
+ # Get pv list.
+ lspv_list = {}
+ for line in current_lspv.splitlines():
+ pv_data = line.split()
+ lspv_list[pv_data[0]] = pv_data[2]
+
+ # Check if pv exists and is free.
+ if pv not in lspv_list.keys():
+ module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
+
+ if lspv_list[pv] == 'None':
+ # Disk None, looks free.
+ # Check if PV is not already in use by Oracle ASM.
+ lquerypv_cmd = module.get_bin_path('lquerypv', True)
+ rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
+ if rc != 0:
+ module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
+
+ if 'ORCLDISK' in current_lquerypv:
+ module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
+
+ msg = "Physical volume '%s' is ok to be used." % pv
+ return True, msg
+
+ # Check if PV is already in use for the same vg.
+ elif vg != lspv_list[pv]:
+ module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
+
+ msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
+ return False, msg
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group '%s' is in varyoff state." % vg
+ return False, msg
+
+ if vg in current_active_vgs:
+ msg = "Volume group '%s' is in varyon state." % vg
+ return True, msg
+
+ msg = "Volume group '%s' does not exist." % vg
+ return None, msg
+
+
+def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
+ """ Creates or extend a volume group. """
+
+ # Command option parameters.
+ force_opt = {
+ True: '-f',
+ False: ''
+ }
+
+ vg_opt = {
+ 'normal': '',
+ 'big': '-B',
+ 'scalable': '-S',
+ }
+
+ # Validate if PV are not already in use.
+ pv_state, msg = _validate_pv(module, vg, pvs)
+ if not pv_state:
+ changed = False
+ return changed, msg
+
+ vg_state, msg = vg_validation
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is True:
+ # Volume group extension.
+ changed = True
+ msg = ""
+
+ if not module.check_mode:
+ extendvg_cmd = module.get_bin_path('extendvg', True)
+ rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Extending volume group '%s' has failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' extended." % vg
+ return changed, msg
+
+ elif vg_state is None:
+ # Volume group creation.
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ mkvg_cmd = module.get_bin_path('mkvg', True)
+ rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Creating volume group '%s' failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' created." % vg
+ return changed, msg
+
+
+def reduce_vg(module, vg, pvs, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is None:
+ changed = False
+ return changed, msg
+
+ # Define pvs_to_remove (list of physical volumes to be removed).
+ if pvs is None:
+ # Remove VG if pvs are note informed.
+ # Remark: AIX will permit remove only if the VG has not LVs.
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
+
+ pvs_to_remove = []
+ for line in current_pvs.splitlines()[2:]:
+ pvs_to_remove.append(line.split()[0])
+
+ reduce_msg = "Volume group '%s' removed." % vg
+ else:
+ pvs_to_remove = pvs
+ reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
+
+ # Reduce volume group.
+ if len(pvs_to_remove) <= 0:
+ changed = False
+ msg = "No physical volumes to remove."
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ reducevg_cmd = module.get_bin_path('reducevg', True)
+ rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
+ if rc != 0:
+ module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
+
+ msg = reduce_msg
+ return changed, msg
+
+
+def state_vg(module, vg, state, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is None:
+ module.fail_json(msg=msg)
+
+ if state == 'varyon':
+ if vg_state is True:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyonvg', True)
+ rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
+
+ msg = "Varyon volume group %s completed." % vg
+ return changed, msg
+
+ elif state == 'varyoff':
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyoffvg', True)
+ rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
+
+ msg = "Varyoff volume group %s completed." % vg
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', default=False),
+ pp_size=dict(type='int'),
+ pvs=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
+ vg=dict(type='str', required=True),
+ vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
+ ),
+ supports_check_mode=True,
+ )
+
+ force = module.params['force']
+ pp_size = module.params['pp_size']
+ pvs = module.params['pvs']
+ state = module.params['state']
+ vg = module.params['vg']
+ vg_type = module.params['vg_type']
+
+ if pp_size is None:
+ pp_size = ''
+ else:
+ pp_size = "-s %s" % pp_size
+
+ vg_validation = _validate_vg(module, vg)
+
+ if state == 'present':
+ if not pvs:
+ changed = False
+ msg = "pvs is required to state 'present'."
+ module.fail_json(msg=msg)
+ else:
+ changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
+
+ elif state == 'absent':
+ changed, msg = reduce_vg(module, vg, pvs, vg_validation)
+
+ elif state == 'varyon' or state == 'varyoff':
+ changed, msg = state_vg(module, vg, state, vg_validation)
+
+ else:
+ changed = False
+ msg = "Unexpected state"
+
+ module.exit_json(changed=changed, msg=msg, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py
new file mode 100644
index 00000000..02b4f06c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alain Dejoux (@adejoux)
+module: aix_lvol
+short_description: Configure AIX LVM logical volumes
+description:
+ - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ type: str
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ type: str
+ required: true
+ lv_type:
+ description:
+ - The type of the logical volume.
+ type: str
+ default: jfs2
+ size:
+ description:
+ - The size of the logical volume with one of the [MGT] units.
+ type: str
+ copies:
+ description:
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
+ type: int
+ default: 1
+ policy:
+ description:
+ - Sets the interphysical volume allocation policy.
+ - C(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ type: str
+ choices: [ maximum, minimum ]
+ default: maximum
+ state:
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ opts:
+ description:
+ - Free-form options to be passed to the mklv command.
+ type: str
+ pvs:
+ description:
+ - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+- name: Create a logical volume of 512M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+
+- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test2lv
+ size: 512M
+ pvs: [ hdisk1, hdisk2 ]
+
+- name: Create a logical volume of 512M mirrored
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test3lv
+ size: 512M
+ copies: 2
+
+- name: Create a logical volume of 1G with a minimum placement policy
+ community.general.aix_lvol:
+ vg: rootvg
+ lv: test4lv
+ size: 1G
+ policy: minimum
+
+- name: Create a logical volume with special options like mirror pool
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+ opts: -p copy1=poolA -p copy2=poolB
+
+- name: Extend the logical volume to 1200M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test4lv
+ size: 1200M
+
+- name: Remove the logical volume
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ type: str
+ description: A friendly message describing the task result.
+ returned: always
+ sample: Logical volume testlv created.
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def convert_size(module, size):
+ unit = size[-1].upper()
+ units = ['M', 'G', 'T']
+ try:
+ multiplier = 1024 ** units.index(unit)
+ except ValueError:
+ module.fail_json(msg="No valid size unit specified.")
+
+ return int(size[:-1]) * multiplier
+
+
+def round_ppsize(x, base=16):
+ new_size = int(base * round(float(x) / base))
+ if new_size < x:
+ new_size += base
+ return new_size
+
+
+def parse_lv(data):
+ name = None
+
+ for line in data.splitlines():
+ match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ vg = match.group(2)
+ continue
+ match = re.search(r"LPs:\s+(\d+).*PPs", line)
+ if match is not None:
+ lps = int(match.group(1))
+ continue
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+ match = re.search(r"INTER-POLICY:\s+(\w+)", line)
+ if match is not None:
+ policy = match.group(1)
+ continue
+
+ if not name:
+ return None
+
+ size = lps * pp_size
+
+ return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
+
+
+def parse_vg(data):
+
+ for line in data.splitlines():
+
+ match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ continue
+
+ match = re.search(r"TOTAL PP.*\((\d+)", line)
+ if match is not None:
+ size = int(match.group(1))
+ continue
+
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+
+ match = re.search(r"FREE PP.*\((\d+)", line)
+ if match is not None:
+ free = int(match.group(1))
+ continue
+
+ return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str', required=True),
+ lv_type=dict(type='str', default='jfs2'),
+ size=dict(type='str'),
+ opts=dict(type='str', default=''),
+ copies=dict(type='int', default=1),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
+ pvs=dict(type='list', elements='str', default=list())
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ lv_type = module.params['lv_type']
+ size = module.params['size']
+ opts = module.params['opts']
+ copies = module.params['copies']
+ policy = module.params['policy']
+ state = module.params['state']
+ pvs = module.params['pvs']
+
+ pv_list = ' '.join(pvs)
+
+ if policy == 'maximum':
+ lv_policy = 'x'
+ else:
+ lv_policy = 'm'
+
+ # Add echo command when running in check-mode
+ if module.check_mode:
+ test_opt = 'echo '
+ else:
+ test_opt = ''
+
+ # check if system commands are available
+ lsvg_cmd = module.get_bin_path("lsvg", required=True)
+ lslv_cmd = module.get_bin_path("lslv", required=True)
+
+ # Get information on volume group requested
+ rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
+
+ this_vg = parse_vg(vg_info)
+
+ if size is not None:
+ # Calculate pp size and round it up based on pp size.
+ lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
+
+ # Get information on logical volume requested
+ rc, lv_info, err = module.run_command(
+ "%s %s" % (lslv_cmd, lv))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
+
+ changed = False
+
+ this_lv = parse_lv(lv_info)
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ if this_lv is None:
+ if state == 'present':
+ if lv_size > this_vg['free']:
+ module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
+
+ # create LV
+ mklv_cmd = module.get_bin_path("mklv", required=True)
+
+ cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s created." % lv)
+ else:
+ module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ rmlv_cmd = module.get_bin_path("rmlv", required=True)
+ rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
+ else:
+ if this_lv['policy'] != policy:
+ # change lv allocation policy
+ chlv_cmd = module.get_bin_path("chlv", required=True)
+ rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
+ else:
+ module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
+
+ if vg != this_lv['vg']:
+ module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
+
+ # from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
+ if not size:
+ module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
+
+ # resize LV based on absolute values
+ if int(lv_size) > this_lv['size']:
+ extendlv_cmd = module.get_bin_path("extendlv", required=True)
+ cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
+ else:
+ module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
+ elif lv_size < this_lv['size']:
+ module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
+ else:
+ module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py
new file mode 100644
index 00000000..56db6dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+author:
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
+options:
+ name:
+ description:
+ - The generic name of the link.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
+ required when the alternative I(name) is unknown to the system.
+ type: path
+ priority:
+ description:
+ - The priority of the alternative.
+ type: int
+ default: 50
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = r'''
+- name: Correct java version selected
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: Alternatives link created
+ community.general.alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: Make java 32 bit an alternative with low priority
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
+'''
+
+import os
+import re
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path'),
+ priority=dict(type='int', default=50),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+ priority = params['priority']
+
+ UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
+
+ current_path = None
+ all_alternatives = []
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, _) = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
+ )
+
+ if rc == 0:
+ # Alternatives already exist for this link group
+ # Parse the output to determine the current path of the symlink and
+ # available alternatives
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
+ re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
+
+ match = current_path_regex.search(display_output)
+ if match:
+ current_path = match.group(1)
+ all_alternatives = alternative_regex.findall(display_output)
+
+ if not link:
+ # Read the current symlink target from `update-alternatives --query`
+ # in case we need to install the new alternative before setting it.
+ #
+ # This is only compatible on Debian-based systems, as the other
+ # alternatives don't have --query available
+ rc, query_output, _ = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
+ )
+ if rc == 0:
+ for line in query_output.splitlines():
+ if line.startswith('Link:'):
+ link = line.split()[1]
+ break
+
+ if current_path != path:
+ if module.check_mode:
+ module.exit_json(changed=True, current_path=current_path)
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ if not os.path.exists(path):
+ module.fail_json(msg="Specified path %s does not exist" % path)
+ if not link:
+ module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
+
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError as cpe:
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py
new file mode 100644
index 00000000..260c7ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: awall
+short_description: Manage awall policies
+author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
+description:
+ - This modules allows for enable/disable/activate of I(awall) policies.
+ - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ and activates the configuration on the system.
+options:
+ name:
+ description:
+ - One or more policy names.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the policies should be enabled or disabled.
+ type: str
+ choices: [ disabled, enabled ]
+ default: enabled
+ activate:
+ description:
+ - Activate the new firewall rules.
+ - Can be run with other steps or on its own.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Enable "foo" and "bar" policy
+ community.general.awall:
+ name: [ foo bar ]
+ state: enabled
+
+- name: Disable "foo" and "bar" policy and activate new rules
+ community.general.awall:
+ name:
+ - foo
+ - bar
+ state: disabled
+ activate: no
+
+- name: Activate currently enabled firewall rules
+ community.general.awall:
+ activate: yes
+'''
+
+RETURN = ''' # '''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def activate(module):
+ cmd = "%s activate --force" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
+
+
+def is_policy_enabled(module, name):
+ cmd = "%s list" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
+ return True
+ return False
+
+
+def enable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if not is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already enabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s enable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
+
+
+def disable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already disabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s disable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
+ name=dict(type='list', elements='str'),
+ activate=dict(type='bool', default=False),
+ ),
+ required_one_of=[['name', 'activate']],
+ supports_check_mode=True,
+ )
+
+ global AWALL_PATH
+ AWALL_PATH = module.get_bin_path('awall', required=True)
+
+ p = module.params
+
+ if p['name']:
+ if p['state'] == 'enabled':
+ enable_policy(module, p['name'], p['activate'])
+ elif p['state'] == 'disabled':
+ disable_policy(module, p['name'], p['activate'])
+
+ if p['activate']:
+ if not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="activated awall rules")
+
+ module.fail_json(msg="no action defined")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py
new file mode 100644
index 00000000..ab53d066
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: beadm
+short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
+description:
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS boot environment name.
+ type: str
+ required: True
+ aliases: [ "be" ]
+ snapshot:
+ description:
+ - If specified, the new boot environment will be cloned from the given
+ snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is
+ available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [ absent, activated, mounted, present, unmounted ]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: present
+
+- name: Create ZFS boot environment from existing inactive boot environment
+ community.general.beadm:
+ name: upgrade-be
+ snapshot: be@old
+ state: present
+
+- name: Create ZFS boot environment with compression enabled and description "upgrade"
+ community.general.beadm:
+ name: upgrade-be
+ options: "compression=on"
+ description: upgrade
+ state: present
+
+- name: Delete ZFS boot environment
+ community.general.beadm:
+ name: old-be
+ state: absent
+
+- name: Mount ZFS boot environment on /tmp/be
+ community.general.beadm:
+ name: BE
+ mountpoint: /tmp/be
+ state: mounted
+
+- name: Unmount ZFS boot environment
+ community.general.beadm:
+ name: BE
+ state: unmounted
+
+- name: Activate ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: activated
+'''
+
+RETURN = r'''
+name:
+ description: BE name
+ returned: always
+ type: str
+ sample: pre-upgrade
+snapshot:
+ description: ZFS snapshot to create BE from
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
+description:
+ description: BE description
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
+options:
+ description: BE additional options
+ returned: always
+ type: str
+ sample: compression=on
+mountpoint:
+ description: BE mountpoint
+ returned: always
+ type: str
+ sample: /mnt/be
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+force:
+ description: If forced action is wanted
+ returned: always
+ type: bool
+ sample: False
+'''
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BE(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.snapshot = module.params['snapshot']
+ self.description = module.params['description']
+ self.options = module.params['options']
+ self.mountpoint = module.params['mountpoint']
+ self.state = module.params['state']
+ self.force = module.params['force']
+ self.is_freebsd = os.uname()[0] == 'FreeBSD'
+
+ def _beadm_list(self):
+ cmd = [self.module.get_bin_path('beadm')]
+ cmd.append('list')
+ cmd.append('-H')
+ if '@' in self.name:
+ cmd.append('-s')
+ return self.module.run_command(cmd)
+
+ def _find_be_by_name(self, out):
+ if '@' in self.name:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if(check == []):
+ continue
+ full_name = check[0].split('/')
+ if(full_name == []):
+ continue
+ check[0] = full_name[len(full_name) - 1]
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ else:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ return None
+
+ def exists(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ if self._find_be_by_name(out):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def is_activated(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ if 'R' in line[1]:
+ return True
+ else:
+ if 'R' in line[2]:
+ return True
+
+ return False
+
+ def activate_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('activate')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('create')
+
+ if self.snapshot:
+ cmd.append('-e')
+ cmd.append(self.snapshot)
+
+ if not self.is_freebsd:
+ if self.description:
+ cmd.append('-d')
+ cmd.append(self.description)
+
+ if self.options:
+ cmd.append('-o')
+ cmd.append(self.options)
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def destroy_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('destroy')
+ cmd.append('-F')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_mounted(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ # On FreeBSD, we exclude currently mounted BE on /, as it is
+ # special and can be activated even if it is mounted. That is not
+ # possible with non-root BEs.
+ if line[2] != '-' and line[2] != '/':
+ return True
+ else:
+ if line[3]:
+ return True
+
+ return False
+
+ def mount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('mount')
+ cmd.append(self.name)
+
+ if self.mountpoint:
+ cmd.append(self.mountpoint)
+
+ return self.module.run_command(cmd)
+
+ def unmount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('unmount')
+ if self.force:
+ cmd.append('-f')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['be']),
+ snapshot=dict(type='str'),
+ description=dict(type='str'),
+ options=dict(type='str'),
+ mountpoint=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ be = BE(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = be.name
+ result['state'] = be.state
+
+ if be.snapshot:
+ result['snapshot'] = be.snapshot
+
+ if be.description:
+ result['description'] = be.description
+
+ if be.options:
+ result['options'] = be.options
+
+ if be.mountpoint:
+ result['mountpoint'] = be.mountpoint
+
+ if be.state == 'absent':
+ # beadm on FreeBSD and Solarish systems differs in delete behaviour in
+ # that we are not allowed to delete activated BE on FreeBSD while on
+ # Solarish systems we cannot delete BE if it is mounted. We add mount
+ # check for both platforms as BE should be explicitly unmounted before
+ # being deleted. On FreeBSD, we also check if the BE is activated.
+ if be.exists():
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if be.is_freebsd:
+ if be.is_activated():
+ module.fail_json(msg='Unable to remove active BE!')
+
+ (rc, out, err) = be.destroy_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while destroying BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ else:
+ module.fail_json(msg='Unable to remove BE as it is mounted!')
+
+ elif be.state == 'present':
+ if not be.exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.create_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while creating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'activated':
+ if not be.is_activated():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # On FreeBSD, beadm is unable to activate mounted BEs, so we add
+ # an explicit check for that case.
+ if be.is_freebsd:
+ if be.is_mounted():
+ module.fail_json(msg='Unable to activate mounted BE!')
+
+ (rc, out, err) = be.activate_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while activating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ elif be.state == 'mounted':
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.mount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while mounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'unmounted':
+ if be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.unmount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while unmounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py
new file mode 100644
index 00000000..ac6dde67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: yes
+ aliases: [ key ]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ type: str
+ required: yes
+ aliases: [ cap ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags into the effective set,
+ so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare,
+ so you will want to ensure that your capabilities argument matches the final capabilities.
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Set cap_sys_chroot+ep on /foo
+ community.general.capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
+
+- name: Remove cap_net_bind_service from /bar
+ community.general.capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+OPS = ('=', '-', '+')
+
+
+class CapabilitiesModule(object):
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [cap[0] for cap in current]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
+ # add new cap with correct op/flags
+ current.append(self.capability_tup)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not exist, the stderr will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or stderr != "":
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append((subcap, op, flags))
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([''.join(cap) for cap in caps])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except Exception:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+
+# ==============================================================
+# main
+
+def main():
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='str', required=True, aliases=['key']),
+ capability=dict(type='str', required=True, aliases=['cap']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ CapabilitiesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py
new file mode 100644
index 00000000..a76f6a78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Cronvar Plugin: The goal of this plugin is to provide an idempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+
+# This module is based on the crontab module.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables.
+ - This module allows you to create, update, or delete cron variable definitions.
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value to set this variable to.
+ - Required if C(state=present).
+ type: str
+ insertafter:
+ description:
+ - If specified, the variable will be inserted after the variable specified.
+ - Used with C(state=present).
+ type: str
+ insertbefore:
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ type: str
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - This parameter defaults to C(root) when unset.
+ type: str
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
+ - With a leading C(/), this is taken as absolute.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ type: bool
+ default: no
+requirements:
+ - cron
+author:
+- Doug Luce (@dougluce)
+'''
+
+EXAMPLES = r'''
+- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
+ community.general.cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+
+- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
+ community.general.cronvar:
+ name: LEGACY
+ state: absent
+
+- name: Add a variable to a file under /etc/cron.d
+ community.general.cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
+'''
+
+import os
+import platform
+import pwd
+import re
+import shlex
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronVarError(Exception):
+ pass
+
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
+ ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ (var_name, _) = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+# ==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - community.general.cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # community.general.cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ user=dict(type='str'),
+ cron_file=dict(type='str'),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ backup=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variable")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py
new file mode 100644
index 00000000..9841a786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Steve <yo@groks.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ type: str
+ required: yes
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ if already present.
+ - Use I(absent) to remove a line with matching I(name).
+ - Use I(opts_present) to add options to those already present; options with
+ different values will be updated.
+ - Use I(opts_absent) to remove options from the existing set.
+ type: str
+ required: yes
+ choices: [ absent, opts_absent, opts_present, present ]
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=).
+ type: str
+ password:
+ description:
+ - Encryption password, the path to a file containing the password, or
+ C(-) or unset if the password should be entered at boot.
+ type: path
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ type: str
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab).
+ - This might be useful in a chroot environment.
+ type: path
+ default: /etc/crypttab
+author:
+- Steve (@groks)
+'''
+
+EXAMPLES = r'''
+- name: Set the options explicitly a device which must already exist
+ community.general.crypttab:
+ name: luks-home
+ state: present
+ opts: discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ community.general.crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ loop: '{{ ansible_mounts }}'
+ when: "'/dev/mapper/luks-' in {{ item.device }}"
+'''
+
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
+ backing_device=dict(type='str'),
+ password=dict(type='path'),
+ opts=dict(type='str'),
+ path=dict(type='path', default='/etc/crypttab')
+ ),
+ supports_check_mode=True,
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception as e:
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
+ exception=traceback.format_exc(), **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ self.line = self.line.rstrip('\n')
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ fields.append('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if k in self:
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if k in self:
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py
new file mode 100644
index 00000000..49c42432
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Branko Majic <branko@majic.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: dconf
+author:
+ - "Branko Majic (@azaghal)"
+short_description: Modify and read dconf database
+description:
+ - This module allows modifications and reading of dconf database. The module
+ is implemented as a wrapper around dconf tool. Please see the dconf(1) man
+ page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module
+ will try to detect an existing session and reuse it, or run the tool via
+ C(dbus-run-session).
+notes:
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
+ C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
+ distribution you are using, you may need to install additional packages to
+ have these available.
+ - Detection of existing, running D-Bus session, required to change settings
+ via C(dconf), is not 100% reliable due to implementation details of D-Bus
+ daemon itself. This might lead to running applications not picking-up
+ changes on the fly if options are changed via Ansible and
+ C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
+ utilises an unusual syntax for the values (GVariant). For example, if you
+ wanted to provide a string value, the correct syntax would be
+ C(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ value.
+ - When using loops in combination with a value like
+ :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
+ type conversions. Applying a filter :code:`"{{ item.value | string }}"`
+ to the parameter variable can avoid potential conversion problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a
+ key is by making the configuration change in application affected by the
+ key, and then having a look at value set via commands C(dconf dump
+ /path/to/dir/) or C(dconf read /path/to/key).
+options:
+ key:
+ type: str
+ required: true
+ description:
+ - A dconf key to modify or read from the dconf database.
+ value:
+ type: str
+ required: false
+ description:
+ - Value to set for the specified dconf key. Value should be specified in
+ GVariant format. Due to complexity of this format, it is best to have a
+ look at existing values in the dconf database. Required for
+ C(state=present).
+ state:
+ type: str
+ required: false
+ default: present
+ choices:
+ - read
+ - present
+ - absent
+ description:
+ - The action to take upon the key/value.
+'''
+
+RETURN = """
+value:
+ description: value associated with the requested key
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
+"""
+
+EXAMPLES = """
+- name: Configure available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ value: "[('xkb', 'us'), ('xkb', 'se')]"
+ state: present
+
+- name: Read currently available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: absent
+
+- name: Configure available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ value: "['us', 'se']"
+ state: present
+
+- name: Read currently available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: absent
+
+- name: Disable desktop effects in Cinnamon
+ community.general.dconf:
+ key: "/org/cinnamon/desktop-effects"
+ value: "false"
+ state: present
+"""
+
+
+import os
+import traceback
+
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ psutil_found = True
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+ psutil_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class DBusWrapper(object):
+ """
+ Helper class that can be used for running a command with a working D-Bus
+ session.
+
+ If possible, command will be run against an existing D-Bus session,
+ otherwise the session will be spawned via dbus-run-session.
+
+ Example usage:
+
+ dbus_wrapper = DBusWrapper(ansible_module)
+ dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
+ """
+
+ def __init__(self, module):
+ """
+ Initialises an instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+ """
+
+ # Store passed-in arguments and set-up some defaults.
+ self.module = module
+
+ # Try to extract existing D-Bus session address.
+ self.dbus_session_bus_address = self._get_existing_dbus_session()
+
+ # If no existing D-Bus session was detected, check if dbus-run-session
+ # is available.
+ if self.dbus_session_bus_address is None:
+ self.module.get_bin_path('dbus-run-session', required=True)
+
+ def _get_existing_dbus_session(self):
+ """
+ Detects and returns an existing D-Bus session bus address.
+
+ :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
+ """
+
+ # We'll be checking the processes of current user only.
+ uid = os.getuid()
+
+ # Go through all the pids for this user, try to extract the D-Bus
+ # session bus address from environment, and ensure it is possible to
+ # connect to it.
+ self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
+
+ for pid in psutil.pids():
+ process = psutil.Process(pid)
+ process_real_uid, _, _ = process.uids()
+ try:
+ if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
+ dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
+ self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
+ command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
+ rc, _, _ = self.module.run_command(command)
+
+ if rc == 0:
+ self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
+
+ return dbus_session_bus_address_candidate
+
+ # This can happen with things like SSH sessions etc.
+ except psutil.AccessDenied:
+ pass
+
+ self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
+
+ return None
+
+ def run_command(self, command):
+ """
+ Runs the specified command within a functional D-Bus session. Command is
+ effectively passed-on to AnsibleModule.run_command() method, with
+ modification for using dbus-run-session if necessary.
+
+ :param command: Command to run, including parameters. Each element of the list should be a string.
+ :type module: list
+
+ :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
+ """
+
+ if self.dbus_session_bus_address is None:
+ self.module.debug("Using dbus-run-session wrapper for running commands.")
+ command = ['dbus-run-session'] + command
+ rc, out, err = self.module.run_command(command)
+
+ if self.dbus_session_bus_address is None and rc == 127:
+ self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
+ else:
+ extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
+ rc, out, err = self.module.run_command(command, environ_update=extra_environment)
+
+ return rc, out, err
+
+
+class DconfPreference(object):
+
+ def __init__(self, module, check_mode=False):
+ """
+ Initialises instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+
+ :param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
+ :type check_mode: bool
+ """
+
+ self.module = module
+ self.check_mode = check_mode
+
+ def read(self, key):
+ """
+ Retrieves current value associated with the dconf key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
+ """
+
+ command = ["dconf", "read", key]
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
+
+ if out == '':
+ value = None
+ else:
+ value = out.rstrip('\n')
+
+ return value
+
+ def write(self, key, value):
+ """
+ Writes the value for specified key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key for which the value should be set. Should be a full path.
+ :type key: str
+
+ :param value: Value to set for the specified dconf key. Should be specified in GVariant format.
+ :type value: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # If no change is needed (or won't be done due to check_mode), notify
+ # caller straight away.
+ if value == self.read(key):
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for write operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "write", key, value]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+ def reset(self, key):
+ """
+ Returns value for the specified key (removes it from user configuration).
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key to reset. Should be a full path.
+ :type key: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # Read the current value first.
+ current_value = self.read(key)
+
+ # No change was needed, key is not set at all, or just notify user if we
+ # are in check mode.
+ if current_value is None:
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for reset operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "reset", key]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent', 'read']),
+ key=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not psutil_found:
+ module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
+
+ # If present state was specified, value must be provided.
+ if module.params['state'] == 'present' and module.params['value'] is None:
+ module.fail_json(msg='State "present" requires "value" to be set.')
+
+ # Create wrapper instance.
+ dconf = DconfPreference(module, module.check_mode)
+
+ # Process based on different states.
+ if module.params['state'] == 'read':
+ value = dconf.read(module.params['key'])
+ module.exit_json(changed=False, value=value)
+ elif module.params['state'] == 'present':
+ changed = dconf.write(module.params['key'], module.params['value'])
+ module.exit_json(changed=changed)
+ elif module.params['state'] == 'absent':
+ changed = dconf.reset(module.params['key'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py
new file mode 100644
index 00000000..b7b57fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2020, Yann Amar <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dpkg_divert
+short_description: Override a debian package's version of a file
+version_added: '0.2.0'
+author:
+ - quidame (@quidame)
+description:
+ - A diversion is for C(dpkg) the knowledge that only a given package
+ (or the local administrator) is allowed to install a file at a given
+ location. Other packages shipping their own version of this file will
+ be forced to I(divert) it, i.e. to install it at another location. It
+ allows one to keep changes in a file provided by a debian package by
+ preventing its overwrite at package upgrade.
+ - This module manages diversions of debian packages files using the
+ C(dpkg-divert) commandline tool. It can either create or remove a
+ diversion for a given file, but also update an existing diversion
+ to modify its I(holder) and/or its I(divert) location.
+options:
+ path:
+ description:
+ - The original and absolute path of the file to be diverted or
+ undiverted. This path is unique, i.e. it is not possible to get
+ two diversions for the same I(path).
+ required: true
+ type: path
+ state:
+ description:
+ - When I(state=absent), remove the diversion of the specified
+ I(path); when I(state=present), create the diversion if it does
+ not exist, or update its package I(holder) or I(divert) location,
+ if it already exists.
+ type: str
+ default: present
+ choices: [absent, present]
+ holder:
+ description:
+ - The name of the package whose copy of file is not diverted, also
+ known as the diversion holder or the package the diversion belongs
+ to.
+ - The actual package does not have to be installed or even to exist
+ for its name to be valid. If not specified, the diversion is hold
+ by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - This parameter is ignored when I(state=absent).
+ type: str
+ divert:
+ description:
+ - The location where the versions of file will be diverted.
+ - Default is to add suffix C(.distrib) to the file path.
+ - This parameter is ignored when I(state=absent).
+ type: path
+ rename:
+ description:
+ - Actually move the file aside (when I(state=present)) or back (when
+ I(state=absent)), but only when changing the state of the diversion.
+ This parameter has no effect when attempting to add a diversion that
+ already exists or when removing an unexisting one.
+ - Unless I(force=true), renaming fails if the destination file already
+ exists (this lock being a dpkg-divert feature, and bypassing it being
+ a module feature).
+ type: bool
+ default: no
+ force:
+ description:
+ - When I(rename=true) and I(force=true), renaming is performed even if
+ the target of the renaming exists, i.e. the existing contents of the
+ file at this location will be lost.
+ - This parameter is ignored when I(rename=false).
+ type: bool
+ default: no
+notes:
+ - This module supports I(check_mode) and I(diff).
+requirements:
+ - dpkg-divert >= 1.15.0 (Debian family)
+'''
+
+EXAMPLES = r'''
+- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+
+- name: Divert /usr/bin/busybox by package 'branding'
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ holder: branding
+
+- name: Divert and rename busybox to busybox.dpkg-divert
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ divert: /usr/bin/busybox.dpkg-divert
+ rename: yes
+
+- name: Remove the busybox diversion and move the diverted file back
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ state: absent
+ rename: yes
+ force: yes
+'''
+
+RETURN = r'''
+commands:
+ description: The dpkg-divert commands ran internally by the module.
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc",
+ "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc"
+ ]
+messages:
+ description: The dpkg-divert relevant messages (stdout or stderr).
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'",
+ "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'"
+ ]
+diversion:
+ description: The status of the diversion after task execution.
+ type: dict
+ returned: always
+ contains:
+ divert:
+ description: The location of the diverted file.
+ type: str
+ holder:
+ description: The package holding the diversion.
+ type: str
+ path:
+ description: The path of the file to divert/undivert.
+ type: str
+ state:
+ description: The state of the diversion.
+ type: str
+ sample: |-
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc"
+ "state": "present"
+ }
+'''
+
+
+import re
+import os
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def diversion_state(module, command, path):
+ diversion = dict(path=path, state='absent', divert=None, holder=None)
+ rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
+ if out:
+ diversion['state'] = 'present'
+ diversion['holder'] = out.rstrip()
+ rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
+ diversion['divert'] = out.rstrip()
+ return diversion
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
+ holder=dict(required=False, type='str'),
+ divert=dict(required=False, type='path'),
+ rename=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ state = module.params['state']
+ holder = module.params['holder']
+ divert = module.params['divert']
+ rename = module.params['rename']
+ force = module.params['force']
+
+ diversion_wanted = dict(path=path, state=state)
+ changed = False
+
+ DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
+ MAINCOMMAND = [DPKG_DIVERT]
+
+ # Option --listpackage is needed and comes with 1.15.0
+ rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
+ [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
+ if LooseVersion(current_version) < LooseVersion("1.15.0"):
+ module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
+ no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ path_exists = os.path.exists(b_path)
+ # Used for things not doable with a single dpkg-divert command (as forced
+ # renaming of files, and diversion's 'holder' or 'divert' updates).
+ target_exists = False
+ truename_exists = False
+
+ diversion_before = diversion_state(module, DPKG_DIVERT, path)
+ if diversion_before['state'] == 'present':
+ b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
+ truename_exists = os.path.exists(b_divert)
+
+ # Append options as requested in the task parameters, but ignore some of
+ # them when removing the diversion.
+ if rename:
+ MAINCOMMAND.append('--rename')
+ elif no_rename_is_supported:
+ MAINCOMMAND.append('--no-rename')
+
+ if state == 'present':
+ if holder and holder != 'LOCAL':
+ MAINCOMMAND.extend(['--package', holder])
+ diversion_wanted['holder'] = holder
+ else:
+ MAINCOMMAND.append('--local')
+ diversion_wanted['holder'] = 'LOCAL'
+
+ if divert:
+ MAINCOMMAND.extend(['--divert', divert])
+ target = divert
+ else:
+ target = '%s.distrib' % path
+
+ MAINCOMMAND.extend(['--add', path])
+ diversion_wanted['divert'] = target
+ b_target = to_bytes(target, errors='surrogate_or_strict')
+ target_exists = os.path.exists(b_target)
+
+ else:
+ MAINCOMMAND.extend(['--remove', path])
+ diversion_wanted['divert'] = None
+ diversion_wanted['holder'] = None
+
+ # Start to populate the returned objects.
+ diversion = diversion_before.copy()
+ maincommand = ' '.join(MAINCOMMAND)
+ commands = [maincommand]
+
+ if module.check_mode or diversion_wanted == diversion_before:
+ MAINCOMMAND.insert(1, '--test')
+ diversion_after = diversion_wanted
+
+ # Just try and see
+ rc, stdout, stderr = module.run_command(MAINCOMMAND)
+
+ if rc == 0:
+ messages = [stdout.rstrip()]
+
+ # else... cases of failure with dpkg-divert are:
+ # - The diversion does not belong to the same package (or LOCAL)
+ # - The divert filename is not the same (e.g. path.distrib != path.divert)
+ # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
+ # diverted file exist)
+
+ elif state != diversion_before['state']:
+ # There should be no case with 'divert' and 'holder' when creating the
+ # diversion from none, and they're ignored when removing the diversion.
+ # So this is all about renaming...
+ if rename and path_exists and (
+ (state == 'absent' and truename_exists) or
+ (state == 'present' and target_exists)):
+ if not force:
+ msg = "Set 'force' param to True to force renaming of files."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ else:
+ msg = "Unexpected error while changing state of the diversion."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+ to_remove = path
+ if state == 'present':
+ to_remove = target
+
+ if not module.check_mode:
+ try:
+ b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
+ os.unlink(b_remove)
+ except OSError as e:
+ msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+
+ messages = [stdout.rstrip()]
+
+ # The situation is that we want to modify the settings (holder or divert)
+ # of an existing diversion. dpkg-divert does not handle this, and we have
+ # to remove the existing diversion first, and then set a new one.
+ else:
+ RMDIVERSION = [DPKG_DIVERT, '--remove', path]
+ if no_rename_is_supported:
+ RMDIVERSION.insert(1, '--no-rename')
+ rmdiversion = ' '.join(RMDIVERSION)
+
+ if module.check_mode:
+ RMDIVERSION.insert(1, '--test')
+
+ if rename:
+ MAINCOMMAND.remove('--rename')
+ if no_rename_is_supported:
+ MAINCOMMAND.insert(1, '--no-rename')
+ maincommand = ' '.join(MAINCOMMAND)
+
+ commands = [rmdiversion, maincommand]
+ rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
+
+ if module.check_mode:
+ messages = [rmdout.rstrip(), 'Running in check mode']
+ else:
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+ messages = [rmdout.rstrip(), stdout.rstrip()]
+
+ # Avoid if possible to orphan files (i.e. to dereference them in diversion
+ # database but let them in place), but do not make renaming issues fatal.
+ # BTW, this module is not about state of files involved in the diversion.
+ old = diversion_before['divert']
+ new = diversion_wanted['divert']
+ if new != old:
+ b_old = to_bytes(old, errors='surrogate_or_strict')
+ b_new = to_bytes(new, errors='surrogate_or_strict')
+ if os.path.exists(b_old) and not os.path.exists(b_new):
+ try:
+ os.rename(b_old, b_new)
+ except OSError as e:
+ pass
+
+ if not module.check_mode:
+ diversion_after = diversion_state(module, DPKG_DIVERT, path)
+
+ diversion = diversion_after.copy()
+ diff = dict()
+ if module._diff:
+ diff['before'] = diversion_before
+ diff['after'] = diversion_after
+
+ if diversion_after != diversion_before:
+ changed = True
+
+ if diversion_after == diversion_wanted:
+ module.exit_json(changed=changed, diversion=diversion,
+ commands=commands, messages=messages, diff=diff)
+ else:
+ msg = "Unexpected error: see stdout and stderr for details."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py
new file mode 100644
index 00000000..abd2ebc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+# ansible www.example.net -m facter
+
+- name: Execute facter no arguments
+ community.general.facter:
+
+- name: Execute facter with arguments
+ community.general.facter:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(required=False, type='list', elements='str')
+ )
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py
new file mode 100644
index 00000000..e78eec4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: filesystem
+short_description: Makes a filesystem
+description:
+ - This module creates a filesystem.
+options:
+ state:
+ description:
+ - If C(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if I(state) is omitted.
+ - If C(state=absent), filesystem signatures on I(dev) are wiped if it
+ contains a filesystem (as known by C(blkid)).
+ - When C(state=absent), all other options but I(dev) are ignored, and the
+ module doesn't fail if the device I(dev) doesn't actually exist.
+ - C(state=absent) is not supported and will fail on FreeBSD systems.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ version_added: 1.3.0
+ fstype:
+ choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
+ description:
+ - Filesystem type to be created. This option is required with
+ C(state=present) (or if I(state) is omitted).
+ - reiserfs support was added in 2.2.
+ - lvm support was added in 2.5.
+ - since 2.5, I(dev) can be an image file.
+ - vfat support was added in 2.5
+ - ocfs2 support was added in 2.6
+ - f2fs support was added in 2.7
+ - swap support was added in 2.8
+ type: str
+ aliases: [type]
+ dev:
+ description:
+ - Target path to device or image file.
+ type: path
+ required: yes
+ aliases: [device]
+ force:
+ description:
+ - If C(yes), allows to create new filesystem on devices that already has filesystem.
+ type: bool
+ default: 'no'
+ resizefs:
+ description:
+ - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
+ - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
+ - XFS Will only grow if mounted. Currently, the module is based on commands
+ from C(util-linux) package to perform operations, so resizing of XFS is
+ not supported on FreeBSD systems.
+ - vFAT will likely fail if fatresize < 1.04.
+ type: bool
+ default: 'no'
+ opts:
+ description:
+ - List of options to be passed to mkfs command.
+ type: str
+requirements:
+ - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
+notes:
+ - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
+ this filesystem is overwritten even if I(force) is C(no).
+ - This module supports I(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a ext2 filesystem on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
+
+- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
+
+- name: Blank filesystem signature on /dev/sdb1
+ community.general.filesystem:
+ dev: /dev/sdb1
+ state: absent
+'''
+
+from distutils.version import LooseVersion
+import os
+import platform
+import re
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Device(object):
+ def __init__(self, module, path):
+ self.module = module
+ self.path = path
+
+ def size(self):
+ """ Return size in bytes of device. Returns int """
+ statinfo = os.stat(self.path)
+ if stat.S_ISBLK(statinfo.st_mode):
+ blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
+ _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
+ return int(devsize_in_bytes)
+ elif os.path.isfile(self.path):
+ return os.path.getsize(self.path)
+ else:
+ self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
+
+ def get_mountpoint(self):
+ """Return (first) mountpoint of device. Returns None when not mounted."""
+ cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
+
+ # find mountpoint
+ rc, mountpoint, _ = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
+ "TARGET", "--source", self.path], check_rc=False)
+ if rc != 0:
+ mountpoint = None
+ else:
+ mountpoint = mountpoint.split('\n')[0]
+
+ return mountpoint
+
+ def __str__(self):
+ return self.path
+
+
+class Filesystem(object):
+
+ GROW = None
+ MKFS = None
+ MKFS_FORCE_FLAGS = ''
+
+ LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+
+ def __init__(self, module):
+ self.module = module
+
+ @property
+ def fstype(self):
+ return type(self).__name__
+
+ def get_fs_size(self, dev):
+ """ Return size in bytes of filesystem on device. Returns int """
+ raise NotImplementedError()
+
+ def create(self, opts, dev):
+ if self.module.check_mode:
+ return
+
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ if opts is None:
+ cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
+ else:
+ cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
+ self.module.run_command(cmd, check_rc=True)
+
+ def wipefs(self, dev):
+ if platform.system() == 'FreeBSD':
+ msg = "module param state=absent is currently not supported on this OS (FreeBSD)."
+ self.module.fail_json(msg=msg)
+
+ if self.module.check_mode:
+ return
+
+ # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
+ # so it is not supported on FreeBSD. Even the use of dd as a fallback is
+ # not doable here if it needs get_mountpoint() (to prevent corruption of
+ # a mounted filesystem), since 'findmnt' is not available on FreeBSD.
+ wipefs = self.module.get_bin_path('wipefs', required=True)
+ cmd = [wipefs, "--all", dev.__str__()]
+ self.module.run_command(cmd, check_rc=True)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ return [cmd, str(dev)]
+
+ def grow(self, dev):
+ """Get dev and fs size and compare. Returns stdout of used command."""
+ devsize_in_bytes = dev.size()
+
+ try:
+ fssize_in_bytes = self.get_fs_size(dev)
+ except NotImplementedError:
+ self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
+
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+ elif self.module.check_mode:
+ self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
+ else:
+ _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
+ return out
+
+
+class Ext(Filesystem):
+ MKFS_FORCE_FLAGS = '-F'
+ GROW = 'resize2fs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('tune2fs', required=True)
+ # Get Block count and Block size
+ _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ return block_size * block_count
+
+
+class Ext2(Ext):
+ MKFS = 'mkfs.ext2'
+
+
+class Ext3(Ext):
+ MKFS = 'mkfs.ext3'
+
+
+class Ext4(Ext):
+ MKFS = 'mkfs.ext4'
+
+
+class XFS(Filesystem):
+ MKFS = 'mkfs.xfs'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'xfs_growfs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('xfs_info', required=True)
+
+ mountpoint = dev.get_mountpoint()
+ if mountpoint:
+ rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV)
+ else:
+ # Recent GNU/Linux distros support access to unmounted XFS filesystems
+ rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV)
+ if rc != 0:
+ self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err)
+
+ for line in out.splitlines():
+ col = line.split('=')
+ if col[0].strip() == 'data':
+ if col[1].strip() != 'bsize':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")')
+ if col[2].split()[1] != 'blocks':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")')
+ block_size = int(col[2].split()[0])
+ block_count = int(col[3].split(',')[0])
+ return block_size * block_count
+
+ def grow_cmd(self, dev):
+ # Check first if growing is needed, and then if it is doable or not.
+ devsize_in_bytes = dev.size()
+ fssize_in_bytes = self.get_fs_size(dev)
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ # xfs filesystem needs to be mounted
+ self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev)
+
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+
+ return [cmd, str(mountpoint)]
+
+
+class Reiserfs(Filesystem):
+ MKFS = 'mkfs.reiserfs'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+class Btrfs(Filesystem):
+ MKFS = 'mkfs.btrfs'
+
+ def __init__(self, module):
+ super(Btrfs, self).__init__(module)
+ _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
+ match = re.search(r" v([0-9.]+)", stdout)
+ if not match:
+ # v0.20-rc1 use stderr
+ match = re.search(r" v([0-9.]+)", stderr)
+ if match:
+ # v0.20-rc1 doesn't have --force parameter added in following version v3.12
+ if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
+ self.MKFS_FORCE_FLAGS = '-f'
+ else:
+ self.MKFS_FORCE_FLAGS = ''
+ else:
+ # assume version is greater or equal to 3.12
+ self.MKFS_FORCE_FLAGS = '-f'
+ self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
+
+
+class Ocfs2(Filesystem):
+ MKFS = 'mkfs.ocfs2'
+ MKFS_FORCE_FLAGS = '-Fx'
+
+
+class F2fs(Filesystem):
+ MKFS = 'mkfs.f2fs'
+ GROW = 'resize.f2fs'
+
+ @property
+ def MKFS_FORCE_FLAGS(self):
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ cmd = "%s %s" % (mkfs, os.devnull)
+ _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
+ # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
+ # mkfs.f2fs displays version since v1.2.0
+ match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
+ if match is not None:
+ # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
+ # before that version -f switch wasn't used
+ if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
+ return '-f'
+
+ return ''
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('dump.f2fs', required=True)
+ # Get sector count and sector size
+ _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ sector_size = None
+ sector_count = None
+ for line in dump.splitlines():
+ if 'Info: sector size = ' in line:
+ # expected: 'Info: sector size = 512'
+ sector_size = int(line.split()[4])
+ elif 'Info: total FS sectors = ' in line:
+ # expected: 'Info: total FS sectors = 102400 (50 MB)'
+ sector_count = int(line.split()[5])
+
+ if None not in (sector_size, sector_count):
+ break
+ else:
+ self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
+ self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
+
+ return sector_size * sector_count
+
+
+class VFAT(Filesystem):
+ if platform.system() == 'FreeBSD':
+ MKFS = "newfs_msdos"
+ else:
+ MKFS = 'mkfs.vfat'
+ GROW = 'fatresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in output.splitlines()[1:]:
+ param, value = line.split(':', 1)
+ if param.strip() == 'Size':
+ return int(value.strip())
+ self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW)
+ return [cmd, "-s", str(dev.size()), str(dev.path)]
+
+
+class LVM(Filesystem):
+ MKFS = 'pvcreate'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'pvresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('pvs', required=True)
+ _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
+ block_count = int(size)
+ return block_count
+
+
+class Swap(Filesystem):
+ MKFS = 'mkswap'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+FILESYSTEMS = {
+ 'ext2': Ext2,
+ 'ext3': Ext3,
+ 'ext4': Ext4,
+ 'ext4dev': Ext4,
+ 'f2fs': F2fs,
+ 'reiserfs': Reiserfs,
+ 'xfs': XFS,
+ 'btrfs': Btrfs,
+ 'vfat': VFAT,
+ 'ocfs2': Ocfs2,
+ 'LVM2_member': LVM,
+ 'swap': Swap,
+}
+
+
+def main():
+ friendly_names = {
+ 'lvm': 'LVM2_member',
+ }
+
+ fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
+ dev=dict(type='path', required=True, aliases=['device']),
+ opts=dict(type='str'),
+ force=dict(type='bool', default=False),
+ resizefs=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('state', 'present', ['fstype'])
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.params['force']
+ resizefs = module.params['resizefs']
+
+ changed = False
+
+ if not os.path.exists(dev):
+ msg = "Device %s not found." % dev
+ if state == "present":
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(msg=msg)
+
+ dev = Device(module, dev)
+
+ cmd = module.get_bin_path('blkid', required=True)
+ rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
+ # In case blkid isn't able to identify an existing filesystem, device is considered as empty,
+ # then this existing filesystem would be overwritten even if force isn't enabled.
+ fs = raw_fs.strip()
+
+ if state == "present":
+ if fstype in friendly_names:
+ fstype = friendly_names[fstype]
+
+ try:
+ klass = FILESYSTEMS[fstype]
+ except KeyError:
+ module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
+
+ filesystem = klass(module)
+
+ same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
+ if same_fs and not resizefs and not force:
+ module.exit_json(changed=False)
+ elif same_fs and resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
+
+ # create fs
+ filesystem.create(opts, dev)
+ changed = True
+
+ elif fs:
+ # wipe fs signatures
+ filesystem = Filesystem(module)
+ filesystem.wipefs(dev)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py
new file mode 100644
index 00000000..b1df1da8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Edit GNOME Configurations
+description:
+ - This module allows for the manipulation of GNOME 2 Configuration via
+ gconftool-2. Please see the gconftool-2(1) man pages for more details.
+options:
+ key:
+ type: str
+ description:
+ - A GConf preference key is an element in the GConf repository
+ that corresponds to an application preference. See man gconftool-2(1)
+ required: yes
+ value:
+ type: str
+ description:
+ - Preference keys typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". See man gconftool-2(1)
+ value_type:
+ type: str
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ choices: [ bool, float, int, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the key/value.
+ required: yes
+ choices: [ absent, get, present ]
+ config_source:
+ type: str
+ description:
+ - Specify a configuration source to use rather than the default path.
+ See man gconftool-2(1)
+ direct:
+ description:
+ - Access the config database directly, bypassing server. If direct is
+ specified then the config_source must be specified as well.
+ See man gconftool-2(1)
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = """
+- name: Change the widget font to "Serif 12"
+ community.general.gconftool2:
+ key: "/desktop/gnome/interface/font_name"
+ value_type: "string"
+ value: "Serif 12"
+"""
+
+RETURN = '''
+ key:
+ description: The key specified in the module parameters
+ returned: success
+ type: str
+ sample: /desktop/gnome/interface/font_name
+ value_type:
+ description: The type of the value that was changed
+ returned: success
+ type: str
+ sample: string
+ value:
+ description: The value of the preference key after executing the module
+ returned: success
+ type: str
+ sample: "Serif 12"
+...
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class GConf2Preference(object):
+ def __init__(self, ansible, key, value_type, value,
+ direct=False, config_source=""):
+ self.ansible = ansible
+ self.key = key
+ self.value_type = value_type
+ self.value = value
+ self.config_source = config_source
+ self.direct = direct
+
+ def value_already_set(self):
+ return False
+
+ def call(self, call_type, fail_onerr=True):
+ """ Helper function to perform gconftool-2 operations """
+ config_source = ''
+ direct = ''
+ changed = False
+ out = ''
+
+ # If the configuration source is different from the default, create
+ # the argument
+ if self.config_source is not None and len(self.config_source) > 0:
+ config_source = "--config-source " + self.config_source
+
+ # If direct is true, create the argument
+ if self.direct:
+ direct = "--direct"
+
+ # Execute the call
+ cmd = "gconftool-2 "
+ try:
+ # If the call is "get", then we don't need as many parameters and
+ # we can ignore some
+ if call_type == 'get':
+ cmd += "--get {0}".format(self.key)
+ # Otherwise, we will use all relevant parameters
+ elif call_type == 'set':
+ cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
+ config_source,
+ self.value_type,
+ call_type,
+ self.key,
+ self.value)
+ elif call_type == 'unset':
+ cmd += "--unset {0}".format(self.key)
+
+ # Start external command
+ rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True)
+
+ if len(err) > 0:
+ if fail_onerr:
+ self.ansible.fail_json(msg='gconftool-2 failed with '
+ 'error: %s' % (str(err)))
+ else:
+ changed = True
+
+ except OSError as exception:
+ self.ansible.fail_json(msg='gconftool-2 failed with exception: '
+ '%s' % exception)
+ return changed, out.rstrip()
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(type='str', required=True),
+ value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
+ value=dict(type='str'),
+ state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ direct=dict(type='bool', default=False),
+ config_source=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ state_values = {"present": "set", "absent": "unset", "get": "get"}
+
+ # Assign module values to dictionary values
+ key = module.params['key']
+ value_type = module.params['value_type']
+ if module.params['value'].lower() == "true":
+ value = "true"
+ elif module.params['value'] == "false":
+ value = "false"
+ else:
+ value = module.params['value']
+
+ state = state_values[module.params['state']]
+ direct = module.params['direct']
+ config_source = module.params['config_source']
+
+ # Initialize some variables for later
+ change = False
+ new_value = ''
+
+ if state != "get":
+ if value is None or value == "":
+ module.fail_json(msg='State %s requires "value" to be set'
+ % str(state))
+ elif value_type is None or value_type == "":
+ module.fail_json(msg='State %s requires "value_type" to be set'
+ % str(state))
+
+ if direct and config_source is None:
+ module.fail_json(msg='If "direct" is "yes" then the ' +
+ '"config_source" must be specified')
+ elif not direct and config_source is not None:
+ module.fail_json(msg='If the "config_source" is specified ' +
+ 'then "direct" must be "yes"')
+
+ # Create a gconf2 preference
+ gconf_pref = GConf2Preference(module, key, value_type,
+ value, direct, config_source)
+ # Now we get the current value, if not found don't fail
+ _, current_value = gconf_pref.call("get", fail_onerr=False)
+
+ # Check if the current value equals the value we want to set. If not, make
+ # a change
+ if current_value != value:
+ # If check mode, we know a change would have occurred.
+ if module.check_mode:
+ # So we will set the change to True
+ change = True
+ # And set the new_value to the value that would have been set
+ new_value = value
+ # If not check mode make the change.
+ else:
+ change, new_value = gconf_pref.call(state)
+ # If the value we want to set is the same as the current_value, we will
+ # set the new_value to the current_value for reporting
+ else:
+ new_value = current_value
+
+ facts = dict(gconftool2={'changed': change,
+ 'key': key,
+ 'value_type': value_type,
+ 'new_value': new_value,
+ 'previous_value': current_value,
+ 'playbook_value': module.params['value']})
+
+ module.exit_json(changed=change, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py
new file mode 100644
index 00000000..d1e37573
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: interfaces_file
+short_description: Tweak settings in /etc/network/interfaces files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual interface options in an interfaces-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
+ - Read information about interfaces from interfaces-styled files
+options:
+ dest:
+ type: path
+ description:
+ - Path to the interfaces file
+ default: /etc/network/interfaces
+ iface:
+ type: str
+ description:
+ - Name of the interface, required for value changes or option remove
+ address_family:
+ type: str
+ description:
+ - Address family of the interface, useful if same interface name is used for both inet and inet6
+ option:
+ type: str
+ description:
+ - Name of the option, required for value changes or option remove
+ value:
+ type: str
+ description:
+ - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
+ If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
+ C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ state:
+ type: str
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ default: "present"
+ choices: [ "present", "absent" ]
+
+notes:
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+requirements: []
+author: "Roman Belyakovsky (@hryamzik)"
+'''
+
+RETURN = '''
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: "/etc/network/interfaces"
+ifaces:
+ description: interfaces dictionary
+ returned: success
+ type: complex
+ contains:
+ ifaces:
+ description: interface dictionary
+ returned: success
+ type: dict
+ contains:
+ eth0:
+ description: Name of the interface
+ returned: success
+ type: dict
+ contains:
+ address_family:
+ description: interface address family
+ returned: success
+ type: str
+ sample: "inet"
+ method:
+ description: interface method
+ returned: success
+ type: str
+ sample: "manual"
+ mtu:
+ description: other options, all values returned as strings
+ returned: success
+ type: str
+ sample: "1500"
+ pre-up:
+ description: list of C(pre-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ up:
+ description: list of C(up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ post-up:
+ description: list of C(post-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ down:
+ description: list of C(down) scripts
+ returned: success
+ type: list
+ sample:
+ - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+...
+'''
+
+EXAMPLES = '''
+- name: Set eth1 mtu configuration value to 8000
+ community.general.interfaces_file:
+ dest: /etc/network/interfaces.d/eth1.cfg
+ iface: eth1
+ option: mtu
+ value: 8000
+ backup: yes
+ state: present
+ register: eth1_cfg
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def lineDict(line):
+ return {'line': line, 'line_type': 'unknown'}
+
+
+def optionDict(line, iface, option, value, address_family):
+ return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
+
+
+def getValueFromLine(s):
+ spaceRe = re.compile(r'\s+')
+ for m in spaceRe.finditer(s):
+ pass
+ valueEnd = m.start()
+ option = s.split()[0]
+ optionStart = s.find(option)
+ optionLen = len(option)
+ valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
+ return s[valueStart:valueEnd]
+
+
+def read_interfaces_file(module, filename):
+ f = open(filename, 'r')
+ return read_interfaces_lines(module, f)
+
+
+def read_interfaces_lines(module, line_strings):
+ lines = []
+ ifaces = {}
+ currently_processing = None
+ i = 0
+ for line in line_strings:
+ i += 1
+ words = line.split()
+ if len(words) < 1:
+ lines.append(lineDict(line))
+ continue
+ if words[0][0] == "#":
+ lines.append(lineDict(line))
+ continue
+ if words[0] == "mapping":
+ # currmap = calloc(1, sizeof *currmap);
+ lines.append(lineDict(line))
+ currently_processing = "MAPPING"
+ elif words[0] == "source":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-dir":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-directory":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "iface":
+ currif = {
+ "pre-up": [],
+ "up": [],
+ "down": [],
+ "post-up": []
+ }
+ iface_name = words[1]
+ try:
+ currif['address_family'] = words[2]
+ except IndexError:
+ currif['address_family'] = None
+ address_family = currif['address_family']
+ try:
+ currif['method'] = words[3]
+ except IndexError:
+ currif['method'] = None
+
+ ifaces[iface_name] = currif
+ lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
+ currently_processing = "IFACE"
+ elif words[0] == "auto":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0].startswith("allow-"):
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-auto-down":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-scripts":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ else:
+ if currently_processing == "IFACE":
+ option_name = words[0]
+ # TODO: if option_name in currif.options
+ value = getValueFromLine(line)
+ lines.append(optionDict(line, iface_name, option_name, value, address_family))
+ if option_name in ["pre-up", "up", "down", "post-up"]:
+ currif[option_name].append(value)
+ else:
+ currif[option_name] = value
+ elif currently_processing == "MAPPING":
+ lines.append(lineDict(line))
+ elif currently_processing == "NONE":
+ lines.append(lineDict(line))
+ else:
+ module.fail_json(msg="misplaced option %s in line %d" % (line, i))
+ return None, None
+ return lines, ifaces
+
+
+def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
+ value = str(raw_value)
+ changed = False
+
+ iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
+ if address_family is not None:
+ iface_lines = [item for item in iface_lines
+ if "address_family" in item and item["address_family"] == address_family]
+
+ if len(iface_lines) < 1:
+ # interface not found
+ module.fail_json(msg="Error: interface %s not found" % iface)
+ return changed, None
+
+ iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
+ target_options = list(filter(lambda i: i['option'] == option, iface_options))
+
+ if state == "present":
+ if len(target_options) < 1:
+ changed = True
+ # add new option
+ last_line_dict = iface_lines[-1]
+ changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
+ else:
+ if option in ["pre-up", "up", "down", "post-up"]:
+ if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
+ changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
+ else:
+ # if more than one option found edit the last one
+ if target_options[-1]['value'] != value:
+ changed = True
+ target_option = target_options[-1]
+ old_line = target_option['line']
+ old_value = target_option['value']
+ address_family = target_option['address_family']
+ prefix_start = old_line.find(option)
+ optionLen = len(option)
+ old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
+ start = old_value_position.start() + prefix_start + optionLen
+ end = old_value_position.end() + prefix_start + optionLen
+ line = old_line[:start] + value + old_line[end:]
+ index = len(lines) - lines[::-1].index(target_option) - 1
+ lines[index] = optionDict(line, iface, option, value, address_family)
+ elif state == "absent":
+ if len(target_options) >= 1:
+ if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
+ for target_option in filter(lambda i: i['value'] == value, target_options):
+ changed = True
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ changed = True
+ for target_option in target_options:
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
+
+ return changed, lines
+
+
+def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
+ # Changing method of interface is not an addition
+ if option == 'method':
+ changed = False
+ for ln in lines:
+ if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ changed = True
+ ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
+ ln['params']['method'] = value
+ return changed, lines
+
+ last_line = last_line_dict['line']
+ prefix_start = last_line.find(last_line.split()[0])
+ suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
+ prefix = last_line[:prefix_start]
+
+ if len(iface_options) < 1:
+ # interface has no options, ident
+ prefix += " "
+
+ line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
+ option_dict = optionDict(line, iface, option, value, address_family)
+ index = len(lines) - lines[::-1].index(last_line_dict)
+ lines.insert(index, option_dict)
+ return True, lines
+
+
+def write_changes(module, lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
+ f.close()
+ module.atomic_move(tmpfile, os.path.realpath(dest))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', default='/etc/network/interfaces'),
+ iface=dict(type='str'),
+ address_family=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ required_by=dict(
+ option=('iface',),
+ ),
+ )
+
+ dest = module.params['dest']
+ iface = module.params['iface']
+ address_family = module.params['address_family']
+ option = module.params['option']
+ value = module.params['value']
+ backup = module.params['backup']
+ state = module.params['state']
+
+ if option is not None and state == "present" and value is None:
+ module.fail_json(msg="Value must be set if option is defined and state is 'present'")
+
+ lines, ifaces = read_interfaces_file(module, dest)
+
+ changed = False
+
+ if option is not None:
+ changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
+
+ if changed:
+ _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(dest)
+ write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
+
+ module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py
new file mode 100644
index 00000000..56475268
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py
@@ -0,0 +1,649 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables_state
+short_description: Save iptables state into a file or restore it from a file
+version_added: '1.1.0'
+author: quidame (@quidame)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP
+ packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same
+ as the behaviour of the C(iptables-save) and C(iptables-restore) (or
+ C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
+ module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to
+ the host in case of mistake in new ruleset. This module embeds a rollback
+ feature to avoid this, by telling the host to restore previous rules if a
+ cookie is still there after a given delay, and all this time telling the
+ controller to try to remove this cookie on the host through a new
+ connection.
+notes:
+ - The rollback feature is not a module option and depends on task's
+ attributes. To enable it, the module must be played asynchronously, i.e.
+ by setting task attributes I(poll) to C(0), and I(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ still happen if it shall happen, but you will experience a connection
+ timeout instead of more relevant info returned by the module after its
+ failure.
+ - This module supports I(check_mode).
+options:
+ counters:
+ description:
+ - Save or restore the values of all packet and byte counters.
+ - When C(true), the module is not idempotent.
+ type: bool
+ default: false
+ ip_version:
+ description:
+ - Which version of the IP protocol this module should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ modprobe:
+ description:
+ - Specify the path to the C(modprobe) program internally used by iptables
+ related commands to load kernel modules.
+ - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ executable's path.
+ type: path
+ noflush:
+ description:
+ - For I(state=restored), ignored otherwise.
+ - If C(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If C(true), the
+ previous rules are left untouched (but policies are updated anyway,
+ for all built-in chains).
+ type: bool
+ default: false
+ path:
+ description:
+ - The file the iptables state should be saved to.
+ - The file the iptables state should be restored from.
+ type: path
+ required: yes
+ state:
+ description:
+ - Whether the firewall state should be saved (into a file) or restored
+ (from a file).
+ type: str
+ choices: [ saved, restored ]
+ required: yes
+ table:
+ description:
+ - When I(state=restored), restore only the named table even if the input
+ file contains other tables. Fail if the named table is not declared in
+ the file.
+ - When I(state=saved), restrict output to the specified table. If not
+ specified, output includes all active tables.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent instant failure in case
+ multiple instances of the program are running concurrently.
+ type: int
+requirements: [iptables, ip6tables]
+'''
+
+EXAMPLES = r'''
+# This will apply to all loaded/active IPv4 tables.
+- name: Save current state of the firewall in system file
+ community.general.iptables_state:
+ state: saved
+ path: /etc/sysconfig/iptables
+
+# This will apply only to IPv6 filter table.
+- name: save current state of the firewall in system file
+ community.general.iptables_state:
+ ip_version: ipv6
+ table: filter
+ state: saved
+ path: /etc/iptables/rules.v6
+
+# This will load a state from a file, with a rollback in case of access loss
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will load new rules by appending them to the current ones
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ noflush: true
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will only retrieve information
+- name: get current state of the firewall
+ community.general.iptables_state:
+ state: saved
+ path: /tmp/iptables
+ check_mode: yes
+ changed_when: false
+ register: iptables_state
+
+- name: show current state of the firewall
+ ansible.builtin.debug:
+ var: iptables_state.initial_state
+'''
+
+RETURN = r'''
+applied:
+ description: Whether or not the wanted state has been successfully restored.
+ type: bool
+ returned: always
+ sample: true
+initial_state:
+ description: The current state of the firewall when module starts.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD ACCEPT [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+restored:
+ description: The state the module restored, whenever it is finally applied or not.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT DROP [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
+ "-A INPUT -m conntrack --ctstate INVALID -j DROP",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "COMMIT",
+ "# Completed"
+ ]
+saved:
+ description: The iptables state the module saved.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+tables:
+ description: The iptables we have interest for when module starts.
+ type: dict
+ contains:
+ table:
+ description: Policies and rules for all chains of the named table.
+ type: list
+ elements: str
+ sample: |-
+ {
+ "filter": [
+ ":INPUT ACCEPT",
+ ":FORWARD ACCEPT",
+ ":OUTPUT ACCEPT",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "-A INPUT -j REJECT --reject-with icmp-host-prohibited"
+ ],
+ "nat": [
+ ":PREROUTING ACCEPT",
+ ":INPUT ACCEPT",
+ ":OUTPUT ACCEPT",
+ ":POSTROUTING ACCEPT"
+ ]
+ }
+ returned: always
+'''
+
+
+import re
+import os
+import time
+import tempfile
+import filecmp
+import shutil
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+IPTABLES = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+SAVE = dict(
+ ipv4='iptables-save',
+ ipv6='ip6tables-save',
+)
+
+RESTORE = dict(
+ ipv4='iptables-restore',
+ ipv6='ip6tables-restore',
+)
+
+TABLES = ['filter', 'mangle', 'nat', 'raw', 'security']
+
+
+def read_state(b_path):
+ '''
+ Read a file and store its content in a variable as a list.
+ '''
+ with open(b_path, 'r') as f:
+ text = f.read()
+ lines = text.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def write_state(b_path, lines, changed):
+ '''
+ Write given contents to the given path, and return changed status.
+ '''
+ # Populate a temporary file
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in lines:
+ f.write('%s\n' % line)
+
+ # Prepare to copy temporary file to the final destination
+ if not os.path.exists(b_path):
+ b_destdir = os.path.dirname(b_path)
+ destdir = to_native(b_destdir, errors='surrogate_or_strict')
+ if b_destdir and not os.path.exists(b_destdir) and not module.check_mode:
+ try:
+ os.makedirs(b_destdir)
+ except Exception as e:
+ module.fail_json(
+ msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]),
+ initial_state=lines)
+ changed = True
+
+ elif not filecmp.cmp(tmpfile, b_path):
+ changed = True
+
+ # Do it
+ if changed and not module.check_mode:
+ try:
+ shutil.copyfile(tmpfile, b_path)
+ except Exception as e:
+ path = to_native(b_path, errors='surrogate_or_strict')
+ module.fail_json(
+ msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]),
+ initial_state=lines)
+
+ return changed
+
+
+def initialize_from_null_state(initializer, initcommand, table):
+ '''
+ This ensures iptables-state output is suitable for iptables-restore to roll
+ back to it, i.e. iptables-save output is not empty. This also works for the
+ iptables-nft-save alternative.
+ '''
+ if table is None:
+ table = 'filter'
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write('*%s\nCOMMIT\n' % table)
+
+ initializer.append(tmpfile)
+ (rc, out, err) = module.run_command(initializer, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+ return (rc, out, err)
+
+
+def filter_and_format_state(string):
+ '''
+ Remove timestamps to ensure idempotence between runs. Also remove counters
+ by default. And return the result as a list.
+ '''
+ string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string)
+ if not module.params['counters']:
+ string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string)
+ lines = string.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def per_table_state(command, state):
+ '''
+ Convert raw iptables-save output into usable datastructure, for reliable
+ comparisons between initial and final states.
+ '''
+ tables = dict()
+ for t in TABLES:
+ COMMAND = list(command)
+ if '*%s' % t in state.splitlines():
+ COMMAND.extend(['--table', t])
+ (rc, out, err) = module.run_command(COMMAND, check_rc=True)
+ out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out)
+ out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out)
+ table = out.splitlines()
+ while '' in table:
+ table.remove('')
+ tables[t] = table
+ return (tables)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ state=dict(type='str', choices=['saved', 'restored'], required=True),
+ table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ noflush=dict(type='bool', default=False),
+ counters=dict(type='bool', default=False),
+ modprobe=dict(type='path'),
+ ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'),
+ wait=dict(type='int'),
+ _timeout=dict(type='int'),
+ _back=dict(type='path'),
+ ),
+ required_together=[
+ ['_timeout', '_back'],
+ ],
+ supports_check_mode=True,
+ )
+
+ # We'll parse iptables-restore stderr
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ path = module.params['path']
+ state = module.params['state']
+ table = module.params['table']
+ noflush = module.params['noflush']
+ counters = module.params['counters']
+ modprobe = module.params['modprobe']
+ ip_version = module.params['ip_version']
+ wait = module.params['wait']
+ _timeout = module.params['_timeout']
+ _back = module.params['_back']
+
+ bin_iptables = module.get_bin_path(IPTABLES[ip_version], True)
+ bin_iptables_save = module.get_bin_path(SAVE[ip_version], True)
+ bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True)
+
+ os.umask(0o077)
+ changed = False
+ COMMANDARGS = []
+ INITCOMMAND = [bin_iptables_save]
+ INITIALIZER = [bin_iptables_restore]
+ TESTCOMMAND = [bin_iptables_restore, '--test']
+
+ if counters:
+ COMMANDARGS.append('--counters')
+
+ if table is not None:
+ COMMANDARGS.extend(['--table', table])
+
+ if wait is not None:
+ TESTCOMMAND.extend(['--wait', '%s' % wait])
+
+ if modprobe is not None:
+ b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict')
+ if not os.path.exists(b_modprobe):
+ module.fail_json(msg="modprobe %s not found" % modprobe)
+ if not os.path.isfile(b_modprobe):
+ module.fail_json(msg="modprobe %s not a file" % modprobe)
+ if not os.access(b_modprobe, os.R_OK):
+ module.fail_json(msg="modprobe %s not readable" % modprobe)
+ if not os.access(b_modprobe, os.X_OK):
+ module.fail_json(msg="modprobe %s not executable" % modprobe)
+ COMMANDARGS.extend(['--modprobe', modprobe])
+ INITIALIZER.extend(['--modprobe', modprobe])
+ INITCOMMAND.extend(['--modprobe', modprobe])
+ TESTCOMMAND.extend(['--modprobe', modprobe])
+
+ SAVECOMMAND = list(COMMANDARGS)
+ SAVECOMMAND.insert(0, bin_iptables_save)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if state == 'restored':
+ if not os.path.exists(b_path):
+ module.fail_json(msg="Source %s not found" % path)
+ if not os.path.isfile(b_path):
+ module.fail_json(msg="Source %s not a file" % path)
+ if not os.access(b_path, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % path)
+ state_to_restore = read_state(b_path)
+ else:
+ cmd = ' '.join(SAVECOMMAND)
+
+ (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True)
+
+ # The issue comes when wanting to restore state from empty iptable-save's
+ # output... what happens when, say:
+ # - no table is specified, and iptables-save's output is only nat table;
+ # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # of the host;
+ # then trying to roll iptables state back to the previous (working) setup
+ # doesn't override current filter table because no filter table is stored
+ # in the backup ! So we have to ensure tables to be restored have a backup
+ # in case of rollback.
+ if table is None:
+ if state == 'restored':
+ for t in TABLES:
+ if '*%s' % t in state_to_restore:
+ if len(stdout) == 0 or '*%s' % t not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t)
+ elif len(stdout) == 0:
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter')
+
+ elif state == 'restored' and '*%s' % table not in state_to_restore:
+ module.fail_json(msg="Table %s to restore not defined in %s" % (table, path))
+
+ elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table)
+
+ initial_state = filter_and_format_state(stdout)
+ if initial_state is None:
+ module.fail_json(msg="Unable to initialize firewall from NULL state.")
+
+ # Depending on the value of 'table', initref_state may differ from
+ # initial_state.
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_before = per_table_state(SAVECOMMAND, stdout)
+ initref_state = filter_and_format_state(stdout)
+
+ if state == 'saved':
+ changed = write_state(b_path, initref_state, changed)
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ saved=initref_state)
+
+ #
+ # All remaining code is for state=restored
+ #
+
+ MAINCOMMAND = list(COMMANDARGS)
+ MAINCOMMAND.insert(0, bin_iptables_restore)
+
+ if wait is not None:
+ MAINCOMMAND.extend(['--wait', '%s' % wait])
+
+ if _back is not None:
+ b_back = to_bytes(_back, errors='surrogate_or_strict')
+ garbage = write_state(b_back, initref_state, changed)
+ BACKCOMMAND = list(MAINCOMMAND)
+ BACKCOMMAND.append(_back)
+
+ if noflush:
+ MAINCOMMAND.append('--noflush')
+
+ MAINCOMMAND.append(path)
+ cmd = ' '.join(MAINCOMMAND)
+
+ TESTCOMMAND = list(MAINCOMMAND)
+ TESTCOMMAND.insert(1, '--test')
+ error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore))
+
+ # Due to a bug in iptables-nft-restore --test, we have to validate tables
+ # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003).
+ for t in tables_before:
+ testcommand = list(TESTCOMMAND)
+ testcommand.extend(['--table', t])
+ (rc, stdout, stderr) = module.run_command(testcommand)
+
+ if 'Another app is currently holding the xtables lock' in stderr:
+ error_msg = stderr
+
+ if rc != 0:
+ cmd = ' '.join(testcommand)
+ module.fail_json(
+ msg=error_msg,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ if module.check_mode:
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in initial_state:
+ f.write('%s\n' % line)
+
+ if filecmp.cmp(tmpfile, b_path):
+ restored_state = initial_state
+ else:
+ restored_state = state_to_restore
+
+ else:
+ # Let time enough to the plugin to retrieve async status of the module
+ # in case of bad option type/value and the like.
+ if _back is not None:
+ b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict')
+ while True:
+ if os.path.exists(b_starter):
+ os.remove(b_starter)
+ break
+ else:
+ time.sleep(0.01)
+ continue
+
+ (rc, stdout, stderr) = module.run_command(MAINCOMMAND)
+ if 'Another app is currently holding the xtables lock' in stderr:
+ module.fail_json(
+ msg=stderr,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ restored_state = filter_and_format_state(stdout)
+
+ if restored_state != initref_state and restored_state != initial_state:
+ if module.check_mode:
+ changed = True
+ else:
+ tables_after = per_table_state(SAVECOMMAND, stdout)
+ if tables_after != tables_before:
+ changed = True
+
+ if _back is None or module.check_mode:
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # The rollback implementation currently needs:
+ # Here:
+ # * test existence of the backup file, exit with success if it doesn't exist
+ # * otherwise, restore iptables from this file and return failure
+ # Action plugin:
+ # * try to remove the backup file
+ # * wait async task is finished and retrieve its final status
+ # * modify it and return the result
+ # Task:
+ # * task attribute 'async' set to the same value (or lower) than ansible
+ # timeout
+ # * task attribute 'poll' equals 0
+ #
+ for x in range(_timeout):
+ if os.path.exists(b_back):
+ time.sleep(1)
+ continue
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # Here we are: for whatever reason, but probably due to the current ruleset,
+ # the action plugin (i.e. on the controller) was unable to remove the backup
+ # cookie, so we restore initial state from it.
+ (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True)
+ os.remove(b_back)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_rollback = per_table_state(SAVECOMMAND, stdout)
+
+ msg = (
+ "Failed to confirm state restored from %s after %ss. "
+ "Firewall has been rolled back to its initial state." % (path, _timeout)
+ )
+
+ module.fail_json(
+ changed=(tables_before != tables_rollback),
+ msg=msg,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py
new file mode 100644
index 00000000..7333397b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, RSD Services S.A
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: java_cert
+short_description: Uses keytool to import/remove key from java keystore (cacerts)
+description:
+ - This is a wrapper module around keytool, which can be used to import/remove
+ certificates from a given java keystore.
+options:
+ cert_url:
+ description:
+ - Basic URL to fetch SSL certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: str
+ cert_port:
+ description:
+ - Port to connect to URL.
+ - This will be used to create server URL:PORT.
+ type: int
+ default: 443
+ cert_path:
+ description:
+ - Local path to load certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: path
+ cert_alias:
+ description:
+ - Imported certificate alias.
+ - The alias is used when checking for the presence of a certificate in the keystore.
+ type: str
+ trust_cacert:
+ description:
+ - Trust imported cert as CAcert.
+ type: bool
+ default: False
+ version_added: '0.2.0'
+ pkcs12_path:
+ description:
+ - Local path to load PKCS12 keystore from.
+ type: path
+ pkcs12_password:
+ description:
+ - Password for importing from PKCS12 keystore.
+ type: str
+ default: ''
+ pkcs12_alias:
+ description:
+ - Alias in the PKCS12 keystore.
+ type: str
+ keystore_path:
+ description:
+ - Path to keystore.
+ type: path
+ keystore_pass:
+ description:
+ - Keystore password.
+ type: str
+ required: true
+ keystore_create:
+ description:
+ - Create keystore if it does not exist.
+ type: bool
+ default: false
+ keystore_type:
+ description:
+ - Keystore type (JCEKS, JKS).
+ type: str
+ executable:
+ description:
+ - Path to keytool binary if not used we search in PATH for it.
+ type: str
+ default: keytool
+ state:
+ description:
+ - Defines action which can be either certificate import or removal.
+ type: str
+ choices: [ absent, present ]
+ default: present
+author:
+- Adam Hamsik (@haad)
+'''
+
+EXAMPLES = r'''
+- name: Import SSL certificate from google.com to a given cacerts keystore
+ community.general.java_cert:
+ cert_url: google.com
+ cert_port: 443
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ state: present
+
+- name: Remove certificate with given alias from a keystore
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ executable: /usr/lib/jvm/jre7/bin/keytool
+ state: absent
+
+- name: Import trusted CA from SSL certificate
+ community.general.java_cert:
+ cert_path: /opt/certs/rootca.crt
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+ cert_alias: LE_RootCA
+ trust_cacert: True
+
+- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ cert_alias: default
+ keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import SSL certificate to JCEKS keystore
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ pkcs12_alias: default
+ pkcs12_password: somepass
+ cert_alias: default
+ keystore_path: /opt/someapp/security/keystore.jceks
+ keystore_type: "JCEKS"
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+'''
+
+RETURN = r'''
+msg:
+ description: Output from stdout of keytool command after execution of given command.
+ returned: success
+ type: str
+ sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
+
+rc:
+ description: Keytool command execution return value.
+ returned: success
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done.
+ returned: success
+ type: str
+ sample: "keytool -importcert -noprompt -keystore"
+'''
+
+import os
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_keystore_type(keystore_type):
+ ''' Check that custom keystore is presented in parameters '''
+ if keystore_type:
+ return " -storetype '%s'" % keystore_type
+ return ''
+
+
+def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Check if certificate with alias is present in keystore
+ located at keystore_path '''
+ test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ (check_rc, _, _) = module.run_command(test_cmd)
+ if check_rc == 0:
+ return True
+ return False
+
+
+def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from URL into keystore located at keystore_path '''
+
+ https_proxy = os.getenv("https_proxy")
+ no_proxy = os.getenv("no_proxy")
+
+ proxy_opts = ''
+ if https_proxy is not None:
+ (proxy_host, proxy_port) = https_proxy.split(':')
+ proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port)
+
+ if no_proxy is not None:
+ # For Java's nonProxyHosts property, items are separated by '|',
+ # and patterns have to start with "*".
+ non_proxy_hosts = no_proxy.replace(',', '|')
+ non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
+
+ # The property name is http.nonProxyHosts, there is no
+ # separate setting for HTTPS.
+ proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts
+
+ fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port)
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, alias,
+ get_keystore_type(keystore_type))
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Fetch SSL certificate from remote host.
+ (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
+
+ # Use remote certificate from remote host and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ data=fetch_out,
+ check_rc=False)
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
+ error=import_err)
+
+
+def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, path, alias,
+ get_keystore_type(keystore_type))
+
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type):
+ ''' Import pkcs12 from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 "
+ "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' "
+ "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass,
+ keystore_pass, path, pkcs12_pass, pkcs12_alias,
+ alias, get_keystore_type(keystore_type))
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Delete certificate identified with alias from keystore on keystore_path '''
+ del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ # Delete SSL certificate from keystore
+ (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
+
+ diff = {'before': '%s\n' % alias, 'after': None}
+
+ module.exit_json(changed=True, msg=del_out,
+ rc=del_rc, cmd=del_cmd, stdout=del_out,
+ error=del_err, diff=diff)
+
+
+def test_keytool(module, executable):
+ ''' Test if keytool is actually executable or not '''
+ module.run_command("%s" % executable, check_rc=True)
+
+
+def test_keystore(module, keystore_path):
+ ''' Check if we can access keystore as file or not '''
+ if keystore_path is None:
+ keystore_path = ''
+
+ if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
+ # Keystore doesn't exist we want to create it
+ module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
+
+
+def main():
+ argument_spec = dict(
+ cert_url=dict(type='str'),
+ cert_path=dict(type='path'),
+ pkcs12_path=dict(type='path'),
+ pkcs12_password=dict(type='str', no_log=True),
+ pkcs12_alias=dict(type='str'),
+ cert_alias=dict(type='str'),
+ cert_port=dict(type='int', default=443),
+ keystore_path=dict(type='path'),
+ keystore_pass=dict(type='str', required=True, no_log=True),
+ trust_cacert=dict(type='bool', default=False),
+ keystore_create=dict(type='bool', default=False),
+ keystore_type=dict(type='str'),
+ executable=dict(type='str', default='keytool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']],
+ required_together=[['keystore_path', 'keystore_pass']],
+ mutually_exclusive=[
+ ['cert_url', 'cert_path', 'pkcs12_path']
+ ],
+ supports_check_mode=True,
+ )
+
+ url = module.params.get('cert_url')
+ path = module.params.get('cert_path')
+ port = module.params.get('cert_port')
+
+ pkcs12_path = module.params.get('pkcs12_path')
+ pkcs12_pass = module.params.get('pkcs12_password', '')
+ pkcs12_alias = module.params.get('pkcs12_alias', '1')
+
+ cert_alias = module.params.get('cert_alias') or url
+ trust_cacert = module.params.get('trust_cacert')
+
+ keystore_path = module.params.get('keystore_path')
+ keystore_pass = module.params.get('keystore_pass')
+ keystore_create = module.params.get('keystore_create')
+ keystore_type = module.params.get('keystore_type')
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+
+ if path and not cert_alias:
+ module.fail_json(changed=False,
+ msg="Using local path import from %s requires alias argument."
+ % keystore_path)
+
+ test_keytool(module, executable)
+
+ if not keystore_create:
+ test_keystore(module, keystore_path)
+
+ cert_present = check_cert_present(module, executable, keystore_path,
+ keystore_pass, cert_alias, keystore_type)
+
+ if state == 'absent' and cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ elif state == 'present' and not cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if pkcs12_path:
+ import_pkcs12_path(module, executable, pkcs12_path, keystore_path,
+ keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type)
+
+ if path:
+ import_cert_path(module, executable, path, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ if url:
+ import_cert_url(module, executable, url, port, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py
new file mode 100644
index 00000000..db37bdee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: java_keystore
+short_description: Create or delete a Java keystore in JKS format.
+description:
+ - Create or delete a Java keystore in JKS format for a given certificate.
+options:
+ name:
+ type: str
+ description:
+ - Name of the certificate.
+ required: true
+ certificate:
+ type: str
+ description:
+ - Certificate that should be used to create the key store.
+ required: true
+ private_key:
+ type: str
+ description:
+ - Private key that should be used to create the key store.
+ required: true
+ private_key_passphrase:
+ description:
+ - Pass phrase for reading the private key, if required.
+ type: str
+ required: false
+ version_added: '0.2.0'
+ password:
+ type: str
+ description:
+ - Password that should be used to secure the key store.
+ required: true
+ dest:
+ type: path
+ description:
+ - Absolute path where the jks should be generated.
+ required: true
+ owner:
+ description:
+ - Name of the user that should own jks file.
+ required: false
+ group:
+ description:
+ - Name of the group that should own jks file.
+ required: false
+ mode:
+ description:
+ - Mode the file should be.
+ required: false
+ force:
+ description:
+ - Key store will be created even if it already exists.
+ required: false
+ type: bool
+ default: 'no'
+requirements: [openssl, keytool]
+author: Guillaume Grossetie (@Mogztter)
+extends_documentation_fragment:
+- files
+
+'''
+
+EXAMPLES = '''
+- name: Create a key store for the given certificate (inline)
+ community.general.java_keystore:
+ name: example
+ certificate: |
+ -----BEGIN CERTIFICATE-----
+ h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
+ MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
+ -----END CERTIFICATE-----
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
+ GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
+ -----END RSA PRIVATE KEY-----
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a key store for the given certificate (lookup)
+ community.general.java_keystore:
+ name: example
+ certificate: "{{lookup('file', '/path/to/certificate.crt') }}"
+ private_key: "{{lookup('file', '/path/to/private.key') }}"
+ password: changeit
+ dest: /etc/security/keystore.jks
+'''
+
+RETURN = '''
+msg:
+ description: Output from stdout of keytool/openssl command after execution of given command or an error.
+ returned: changed and failure
+ type: str
+ sample: "Unable to find the current certificate fingerprint in ..."
+
+rc:
+ description: keytool/openssl command execution return value
+ returned: changed and failure
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done
+ returned: changed and failure
+ type: str
+ sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256"
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+
+
+def read_certificate_fingerprint(module, openssl_bin, certificate_path):
+ current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"]
+ (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd)
+ if rc != 0:
+ return module.fail_json(msg=current_certificate_fingerprint_out,
+ err=current_certificate_fingerprint_err,
+ rc=rc,
+ cmd=current_certificate_fingerprint_cmd)
+
+ current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
+ if not current_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out,
+ rc=rc,
+ cmd=current_certificate_fingerprint_err
+ )
+
+ return current_certificate_match.group(1)
+
+
+def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password):
+ stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass", keystore_password, "-v"]
+ (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd)
+ if rc != 0:
+ if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out:
+ return module.fail_json(msg=stored_certificate_fingerprint_out,
+ err=stored_certificate_fingerprint_err,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd)
+ else:
+ return None
+ else:
+ stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
+ if not stored_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd
+ )
+
+ return stored_certificate_match.group(1)
+
+
+def run_commands(module, cmd, data=None, check_rc=True):
+ return module.run_command(cmd, check_rc=check_rc, data=data)
+
+
+def create_file(path, content):
+ with open(path, 'w') as f:
+ f.write(content)
+ return path
+
+
+def create_tmp_certificate(module):
+ return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate'])
+
+
+def create_tmp_private_key(module):
+ return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key'])
+
+
+def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias):
+ certificate_path = create_tmp_certificate(module)
+ try:
+ current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path)
+ stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass)
+ return current_certificate_fingerprint != stored_certificate_fingerprint
+ finally:
+ os.remove(certificate_path)
+
+
+def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ certificate_path = create_tmp_certificate(module)
+ private_key_path = create_tmp_private_key(module)
+ try:
+ if os.path.exists(keystore_path):
+ os.remove(keystore_path)
+
+ keystore_p12_path = "/tmp/keystore.p12"
+ if os.path.exists(keystore_p12_path):
+ os.remove(keystore_p12_path)
+
+ export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path,
+ "-inkey", private_key_path, "-out",
+ keystore_p12_path, "-passout", "stdin"]
+
+ # when keypass is provided, add -passin
+ cmd_stdin = ""
+ if keypass:
+ export_p12_cmd.append("-passin")
+ export_p12_cmd.append("stdin")
+ cmd_stdin = "%s\n" % keypass
+
+ cmd_stdin += "%s\n%s" % (password, password)
+ (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd, data=cmd_stdin)
+ if rc != 0:
+ return module.fail_json(msg=export_p12_out,
+ rc=rc,
+ cmd=export_p12_cmd)
+
+ import_keystore_cmd = [keytool_bin, "-importkeystore",
+ "-destkeystore", keystore_path,
+ "-srckeystore", keystore_p12_path,
+ "-srcstoretype", "pkcs12",
+ "-alias", name,
+ "-deststorepass", password,
+ "-srcstorepass", password,
+ "-noprompt"]
+ (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None)
+ if rc == 0:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=True,
+ msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd,
+ stdout_lines=import_keystore_out)
+ else:
+ return module.fail_json(msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd)
+ finally:
+ os.remove(certificate_path)
+ os.remove(private_key_path)
+
+
+def update_jks_perm(module, keystore_path):
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=keystore_path)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = keystore_path
+ file_args = module.load_file_common_arguments(module.params)
+ module.set_fs_attributes_if_different(file_args, False)
+
+
+def process_jks(module):
+ name = module.params['name']
+ password = module.params['password']
+ keypass = module.params['private_key_passphrase']
+ keystore_path = module.params['dest']
+ force = module.params['force']
+ openssl_bin = module.get_bin_path('openssl', True)
+ keytool_bin = module.get_bin_path('keytool', True)
+
+ if os.path.exists(keystore_path):
+ if force:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name):
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if not module.check_mode:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=False)
+ else:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+
+
+class ArgumentSpec(object):
+ def __init__(self):
+ self.supports_check_mode = True
+ self.add_file_common_args = True
+ argument_spec = dict(
+ name=dict(required=True),
+ certificate=dict(required=True, no_log=True),
+ private_key=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ dest=dict(required=True, type='path'),
+ force=dict(required=False, default=False, type='bool'),
+ private_key_passphrase=dict(required=False, no_log=True, type='str')
+ )
+ self.argument_spec = argument_spec
+
+
+def main():
+ spec = ArgumentSpec()
+ module = AnsibleModule(
+ argument_spec=spec.argument_spec,
+ add_file_common_args=spec.add_file_common_args,
+ supports_check_mode=spec.supports_check_mode
+ )
+ process_jks(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py
new file mode 100644
index 00000000..ff6f9c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# encoding: utf-8 -*-
+
+# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author:
+- Matthias Vogelgesang (@matze)
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+options:
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [ absent, present ]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+'''
+
+EXAMPLES = '''
+- name: Blacklist the nouveau driver module
+ community.general.kernel_blacklist:
+ name: nouveau
+ state: present
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Blacklist(object):
+ def __init__(self, module, filename, checkmode):
+ self.filename = filename
+ self.module = module
+ self.checkmode = checkmode
+
+ def create_file(self):
+ if not self.checkmode and not os.path.exists(self.filename):
+ open(self.filename, 'a').close()
+ return True
+ elif self.checkmode and not os.path.exists(self.filename):
+ self.filename = os.devnull
+ return True
+ else:
+ return False
+
+ def get_pattern(self):
+ return r'^blacklist\s*' + self.module + '$'
+
+ def readlines(self):
+ f = open(self.filename, 'r')
+ lines = f.readlines()
+ f.close()
+ return lines
+
+ def module_listed(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ for line in lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+
+ if re.match(pattern, stripped):
+ return True
+
+ return False
+
+ def remove_module(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ if self.checkmode:
+ f = open(os.devnull, 'w')
+ else:
+ f = open(self.filename, 'w')
+
+ for line in lines:
+ if not re.match(pattern, line.strip()):
+ f.write(line)
+
+ f.close()
+
+ def add_module(self):
+ if self.checkmode:
+ f = open(os.devnull, 'a')
+ else:
+ f = open(self.filename, 'a')
+
+ f.write('blacklist %s\n' % self.module)
+
+ f.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ blacklist_file=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ args = dict(changed=False, failed=False,
+ name=module.params['name'], state=module.params['state'])
+
+ filename = '/etc/modprobe.d/blacklist-ansible.conf'
+
+ if module.params['blacklist_file']:
+ filename = module.params['blacklist_file']
+
+ blacklist = Blacklist(args['name'], filename, module.check_mode)
+
+ if blacklist.create_file():
+ args['changed'] = True
+ else:
+ args['changed'] = False
+
+ if blacklist.module_listed():
+ if args['state'] == 'absent':
+ blacklist.remove_module()
+ args['changed'] = True
+ else:
+ if args['state'] == 'present':
+ blacklist.add_module()
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py
new file mode 100644
index 00000000..919d8d7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py
@@ -0,0 +1,514 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Martin Migasiewicz <migasiew.nk@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: launchd
+author:
+- Martin Migasiewicz (@martinm82)
+short_description: Manage macOS services
+version_added: 1.0.0
+description:
+- Manage launchd services on target macOS hosts.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - Launchd does not support C(restarted) nor C(reloaded) natively.
+ These will trigger a stop/start (restarted) or an unload/load
+ (reloaded).
+ - C(restarted) unloads and loads the service before start to ensure
+ that the latest job definition (plist) is used.
+ - C(reloaded) unloads and loads the service to ensure that the latest
+ job definition (plist) is used. Whether a service is started or
+ stopped depends on the content of the definition file.
+ type: str
+ choices: [ reloaded, restarted, started, stopped, unloaded ]
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ force_stop:
+ description:
+ - Whether the service should not be restarted automatically by launchd.
+ - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
+ In case this is set to true, stopping a service will cause that launchd starts the service again.
+ - Set this option to C(yes) to let this module change the 'KeepAlive' attribute to false.
+ type: bool
+ default: no
+notes:
+- A user must privileged to manage services using this module.
+requirements:
+- A system managed by launchd
+- The plistlib python library
+'''
+
+EXAMPLES = r'''
+- name: Make sure spotify webhelper is started
+ community.general.launchd:
+ name: com.spotify.webhelper
+ state: started
+
+- name: Deploy custom memcached job definition
+ template:
+ src: org.memcached.plist.j2
+ dest: /Library/LaunchDaemons/org.memcached.plist
+
+- name: Run memcached
+ community.general.launchd:
+ name: org.memcached
+ state: started
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+ force_stop: yes
+
+- name: Restart memcached
+ community.general.launchd:
+ name: org.memcached
+ state: restarted
+
+- name: Unload memcached
+ community.general.launchd:
+ name: org.memcached
+ state: unloaded
+'''
+
+RETURN = r'''
+status:
+ description: Metadata about service status
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+'''
+
+import os
+import plistlib
+from abc import ABCMeta, abstractmethod
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class ServiceState:
+ UNKNOWN = 0
+ LOADED = 1
+ STOPPED = 2
+ STARTED = 3
+ UNLOADED = 4
+
+ @staticmethod
+ def to_string(state):
+ strings = {
+ ServiceState.UNKNOWN: 'unknown',
+ ServiceState.LOADED: 'loaded',
+ ServiceState.STOPPED: 'stopped',
+ ServiceState.STARTED: 'started',
+ ServiceState.UNLOADED: 'unloaded'
+ }
+ return strings[state]
+
+
+class Plist:
+ def __init__(self, module, service):
+ self.__changed = False
+ self.__service = service
+
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+
+ # Check if readPlist is available or not
+ self.old_plistlib = hasattr(plistlib, 'readPlist')
+
+ self.__file = self.__find_service_plist(service)
+ if self.__file is None:
+ msg = 'Unable to infer the path of %s service plist file' % service
+ if pid is None and state == ServiceState.UNLOADED:
+ msg += ' and it was not found among active services'
+ module.fail_json(msg=msg)
+ self.__update(module)
+
+ @staticmethod
+ def __find_service_plist(service_name):
+ """Finds the plist file associated with a service"""
+
+ launchd_paths = [
+ os.path.expanduser('~/Library/LaunchAgents'),
+ '/Library/LaunchAgents',
+ '/Library/LaunchDaemons',
+ '/System/Library/LaunchAgents',
+ '/System/Library/LaunchDaemons'
+ ]
+
+ for path in launchd_paths:
+ try:
+ files = os.listdir(path)
+ except OSError:
+ continue
+
+ filename = '%s.plist' % service_name
+ if filename in files:
+ return os.path.join(path, filename)
+ return None
+
+ def __update(self, module):
+ self.__handle_param_enabled(module)
+ self.__handle_param_force_stop(module)
+
+ def __read_plist_file(self, module):
+ service_plist = {}
+ if self.old_plistlib:
+ return plistlib.readPlist(self.__file)
+
+ # readPlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'rb') as plist_fp:
+ service_plist = plistlib.load(plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to read plist file "
+ "%s due to %s" % (self.__file, to_native(e)))
+ return service_plist
+
+ def __write_plist_file(self, module, service_plist=None):
+ if not service_plist:
+ service_plist = {}
+
+ if self.old_plistlib:
+ plistlib.writePlist(service_plist, self.__file)
+ return
+ # writePlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'wb') as plist_fp:
+ plistlib.dump(service_plist, plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to write to plist file "
+ " %s due to %s" % (self.__file, to_native(e)))
+
+ def __handle_param_enabled(self, module):
+ if module.params['enabled'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Enable/disable service startup at boot if requested
+ # Launchctl does not expose functionality to set the RunAtLoad
+ # attribute of a job definition. So we parse and modify the job
+ # definition plist file directly for this purpose.
+ if module.params['enabled'] is not None:
+ enabled = service_plist.get('RunAtLoad', False)
+ if module.params['enabled'] != enabled:
+ service_plist['RunAtLoad'] = module.params['enabled']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def __handle_param_force_stop(self, module):
+ if module.params['force_stop'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Set KeepAlive to false in case force_stop is defined to avoid
+ # that the service gets restarted when stopping was requested.
+ if module.params['force_stop'] is not None:
+ keep_alive = service_plist.get('KeepAlive', False)
+ if module.params['force_stop'] and keep_alive:
+ service_plist['KeepAlive'] = not module.params['force_stop']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def is_changed(self):
+ return self.__changed
+
+ def get_file(self):
+ return self.__file
+
+
+class LaunchCtlTask(object):
+ __metaclass__ = ABCMeta
+ WAITING_TIME = 5 # seconds
+
+ def __init__(self, module, service, plist):
+ self._module = module
+ self._service = service
+ self._plist = plist
+ self._launch = self._module.get_bin_path('launchctl', True)
+
+ def run(self):
+ """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc.
+ and returns the new state and pid.
+ """
+ self.runCommand()
+ return self.get_state()
+
+ @abstractmethod
+ def runCommand(self):
+ pass
+
+ def get_state(self):
+ rc, out, err = self._launchctl("list")
+ if rc != 0:
+ self._module.fail_json(
+ msg='Failed to get status of %s' % (self._launch))
+
+ state = ServiceState.UNLOADED
+ service_pid = "-"
+ status_code = None
+ for line in out.splitlines():
+ if line.strip():
+ pid, last_exit_code, label = line.split('\t')
+ if label.strip() == self._service:
+ service_pid = pid
+ status_code = last_exit_code
+
+ # From launchctl man page:
+ # If the number [...] is negative, it represents the
+ # negative of the signal which killed the job. Thus,
+ # "-15" would indicate that the job was terminated with
+ # SIGTERM.
+ if last_exit_code not in ['0', '-2', '-3', '-9', '-15']:
+ # Something strange happened and we have no clue in
+ # which state the service is now. Therefore we mark
+ # the service state as UNKNOWN.
+ state = ServiceState.UNKNOWN
+ elif pid != '-':
+ # PID seems to be an integer so we assume the service
+ # is started.
+ state = ServiceState.STARTED
+ else:
+ # Exit code is 0 and PID is not available so we assume
+ # the service is stopped.
+ state = ServiceState.STOPPED
+ break
+ return (state, service_pid, status_code, err)
+
+ def start(self):
+ rc, out, err = self._launchctl("start")
+ # Unfortunately launchd does not wait until the process really started.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def stop(self):
+ rc, out, err = self._launchctl("stop")
+ # Unfortunately launchd does not wait until the process really stopped.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def restart(self):
+ # TODO: check for rc, out, err
+ self.stop()
+ return self.start()
+
+ def reload(self):
+ # TODO: check for rc, out, err
+ self.unload()
+ return self.load()
+
+ def load(self):
+ return self._launchctl("load")
+
+ def unload(self):
+ return self._launchctl("unload")
+
+ def _launchctl(self, command):
+ service_or_plist = self._plist.get_file() if command in [
+ 'load', 'unload'] else self._service if command in ['start', 'stop'] else ""
+
+ rc, out, err = self._module.run_command(
+ '%s %s %s' % (self._launch, command, service_or_plist))
+
+ if rc != 0:
+ msg = "Unable to %s '%s' (%s): '%s'" % (
+ command, self._service, self._plist.get_file(), err)
+ self._module.fail_json(msg=msg)
+
+ return (rc, out, err)
+
+
+class LaunchCtlStart(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state in (ServiceState.STOPPED, ServiceState.LOADED):
+ self.reload()
+ self.start()
+ elif state == ServiceState.STARTED:
+ # In case the service is already in started state but the
+ # job definition was changed we need to unload/load the
+ # service and start the service again.
+ if self._plist.is_changed():
+ self.reload()
+ self.start()
+ elif state == ServiceState.UNLOADED:
+ self.load()
+ self.start()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and start the service again.
+ self.reload()
+ self.start()
+
+
+class LaunchCtlStop(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStop, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.STOPPED:
+ # In case the service is stopped and we might later decide
+ # to start it, we need to reload the job definition by
+ # forcing an unload and load first.
+ # Afterwards we need to stop it as it might have been
+ # started again (KeepAlive or RunAtLoad).
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state in (ServiceState.STARTED, ServiceState.LOADED):
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and stop the service gracefully.
+ self.reload()
+ self.stop()
+
+
+class LaunchCtlReload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlReload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.UNLOADED:
+ # launchd throws an error if we do an unload on an already
+ # unloaded service.
+ self.load()
+ else:
+ self.reload()
+
+
+class LaunchCtlUnload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlUnload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+ self.unload()
+
+
+class LaunchCtlRestart(LaunchCtlReload):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlRestart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ super(LaunchCtlRestart, self).runCommand()
+ self.start()
+
+
+class LaunchCtlList(LaunchCtlTask):
+ def __init__(self, module, service):
+ super(LaunchCtlList, self).__init__(module, service, None)
+
+ def runCommand(self):
+ # Do nothing, the list functionality is done by the
+ # base class run method.
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
+ enabled=dict(type='bool'),
+ force_stop=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[
+ ['state', 'enabled'],
+ ],
+ )
+
+ service = module.params['name']
+ action = module.params['state']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': {},
+ }
+
+ # We will tailor the plist file in case one of the options
+ # (enabled, force_stop) was specified.
+ plist = Plist(module, service)
+ result['changed'] = plist.is_changed()
+
+ # Gather information about the service to be controlled.
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+ result['status']['previous_state'] = ServiceState.to_string(state)
+ result['status']['previous_pid'] = pid
+
+ # Map the actions to specific tasks
+ tasks = {
+ 'started': LaunchCtlStart(module, service, plist),
+ 'stopped': LaunchCtlStop(module, service, plist),
+ 'restarted': LaunchCtlRestart(module, service, plist),
+ 'reloaded': LaunchCtlReload(module, service, plist),
+ 'unloaded': LaunchCtlUnload(module, service, plist)
+ }
+
+ status_code = '0'
+ # Run the requested task
+ if not module.check_mode:
+ state, pid, status_code, err = tasks[action].run()
+
+ result['status']['current_state'] = ServiceState.to_string(state)
+ result['status']['current_pid'] = pid
+ result['status']['status_code'] = status_code
+ result['status']['error'] = err
+
+ if (result['status']['current_state'] != result['status']['previous_state'] or
+ result['status']['current_pid'] != result['status']['previous_pid']):
+ result['changed'] = True
+ if module.check_mode:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py
new file mode 100644
index 00000000..6f850791
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lbu
+
+short_description: Local Backup Utility for Alpine Linux
+
+version_added: '0.2.0'
+
+description:
+- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
+
+options:
+ commit:
+ description:
+ - Control whether to commit changed files.
+ type: bool
+ exclude:
+ description:
+ - List of paths to exclude.
+ type: list
+ elements: str
+ include:
+ description:
+ - List of paths to include.
+ type: list
+ elements: str
+
+author:
+- Kaarle Ritvanen (@kunkku)
+'''
+
+EXAMPLES = '''
+# Commit changed files (if any)
+- name: Commit
+ community.general.lbu:
+ commit: true
+
+# Exclude path and commit
+- name: Exclude directory
+ community.general.lbu:
+ commit: true
+ exclude:
+ - /etc/opt
+
+# Include paths without committing
+- name: Include file and directory
+ community.general.lbu:
+ include:
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+'''
+
+RETURN = '''
+msg:
+ description: Error message
+ type: str
+ returned: on failure
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os.path
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec={
+ 'commit': {'type': 'bool'},
+ 'exclude': {'type': 'list', 'elements': 'str'},
+ 'include': {'type': 'list', 'elements': 'str'}
+ },
+ supports_check_mode=True
+ )
+
+ changed = False
+
+ def run_lbu(*args):
+ code, stdout, stderr = module.run_command(
+ [module.get_bin_path('lbu', required=True)] + list(args)
+ )
+ if code:
+ module.fail_json(changed=changed, msg=stderr)
+ return stdout
+
+ update = False
+ commit = False
+
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ paths = run_lbu(param, '-l').split('\n')
+ for path in module.params[param]:
+ if os.path.normpath('/' + path)[1:] not in paths:
+ update = True
+
+ if module.params['commit']:
+ commit = update or run_lbu('status') > ''
+
+ if module.check_mode:
+ module.exit_json(changed=update or commit)
+
+ if update:
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ run_lbu(param, *module.params[param])
+ changed = True
+
+ if commit:
+ run_lbu('commit')
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py
new file mode 100644
index 00000000..27ecca8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2017, Nathan Davison <ndavison85@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: listen_ports_facts
+author:
+ - Nathan Davison (@ndavison)
+description:
+ - Gather facts on processes listening on TCP and UDP ports using netstat command.
+ - This module currently supports Linux only.
+requirements:
+ - netstat
+short_description: Gather facts on processes listening on TCP and UDP ports.
+'''
+
+EXAMPLES = r'''
+- name: Gather facts on listening ports
+ community.general.listen_ports_facts:
+
+- name: TCP whitelist violation
+ ansible.builtin.debug:
+ msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
+ vars:
+ tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
+ tcp_whitelist:
+ - 22
+ - 25
+ loop: "{{ tcp_listen_violations }}"
+
+- name: List TCP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
+
+- name: List UDP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
+
+- name: List all ports
+ ansible.builtin.debug:
+ msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of TCP and UDP ports with listening servers
+ returned: always
+ type: complex
+ contains:
+ tcp_listen:
+ description: A list of processes that are listening on a TCP port.
+ returned: if TCP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "mysqld"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 1223
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 3306
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "tcp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "mysql"
+ udp_listen:
+ description: A list of processes that are listening on a UDP port.
+ returned: if UDP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "rsyslogd"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 609
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 514
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "udp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "root"
+'''
+
+import re
+import platform
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def netStatParse(raw):
+ results = list()
+ for line in raw.splitlines():
+ listening_search = re.search('[^ ]+:[0-9]+', line)
+ if listening_search:
+ splitted = line.split()
+ conns = re.search('([^ ]+):([0-9]+)', splitted[3])
+ pidstr = ''
+ if 'tcp' in splitted[0]:
+ protocol = 'tcp'
+ pidstr = splitted[6]
+ elif 'udp' in splitted[0]:
+ protocol = 'udp'
+ pidstr = splitted[5]
+ pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
+ if conns and pids:
+ address = conns.group(1)
+ port = conns.group(2)
+ if (pids.group(2)):
+ pid = pids.group(2)
+ else:
+ pid = 0
+ if (pids.group(3)):
+ name = pids.group(3)
+ else:
+ name = ''
+ result = {
+ 'pid': int(pid),
+ 'address': address,
+ 'port': int(port),
+ 'protocol': protocol,
+ 'name': name,
+ }
+ if result not in results:
+ results.append(result)
+ else:
+ raise EnvironmentError('Could not get process information for the listening ports.')
+ return results
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ if platform.system() != 'Linux':
+ module.fail_json(msg='This module requires Linux.')
+
+ def getPidSTime(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
+ stime = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if 'started' not in line:
+ stime = line
+ return stime
+
+ def getPidUser(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
+ user = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if line != 'USER':
+ user = line
+ return user
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': {
+ 'tcp_listen': [],
+ 'udp_listen': [],
+ },
+ }
+
+ try:
+ netstat_cmd = module.get_bin_path('netstat', True)
+
+ # which ports are listening for connections?
+ rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
+ if rc == 0:
+ netstatOut = netStatParse(stdout)
+ for p in netstatOut:
+ p['stime'] = getPidSTime(p['pid'])
+ p['user'] = getPidUser(p['pid'])
+ if p['protocol'] == 'tcp':
+ result['ansible_facts']['tcp_listen'].append(p)
+ elif p['protocol'] == 'udp':
+ result['ansible_facts']['udp_listen'].append(p)
+ except (KeyError, EnvironmentError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py
new file mode 100644
index 00000000..9a5b84f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ type: str
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ community.general.locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py
new file mode 100644
index 00000000..25f261ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ elements: str
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ pvresize:
+ description:
+ - If C(yes), resize the physical volume to the maximum available size.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: community.general.filesystem
+- module: community.general.lvol
+- module: community.general.parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ community.general.lvg:
+ vg: vg.services
+ state: absent
+
+- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible
+ community.general.lvg:
+ vg: resizableVG
+ pvs: /dev/sda3
+ pvresize: yes
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list', elements='str'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ pvresize=dict(type='bool', default=False),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pvresize = module.boolean(module.params['pvresize'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if state == 'present' and pvresize:
+ for device in current_devs:
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+ rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
+ dev_size = int(dev_size.replace(" ", ""))
+ rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
+ pv_size = int(pv_size.replace(" ", ""))
+ rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
+ pe_start = int(pe_start.replace(" ", ""))
+ rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
+ vg_extent_size = int(vg_extent_size.replace(" ", ""))
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ rc, _, err = module.run_command([pvresize_cmd, device])
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
+ else:
+ changed = True
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py
new file mode 100644
index 00000000..fa50007e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py
@@ -0,0 +1,566 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+options:
+ vg:
+ type: str
+ required: true
+ description:
+ - The volume group this logical volume is part of.
+ lv:
+ type: str
+ description:
+ - The name of the logical volume.
+ size:
+ type: str
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
+ state:
+ type: str
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ choices: [ absent, present ]
+ default: present
+ active:
+ description:
+ - Whether the volume is active and visible to the host.
+ type: bool
+ default: 'yes'
+ force:
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ type: bool
+ default: 'no'
+ opts:
+ type: str
+ description:
+ - Free-form options to be passed to the lvcreate command.
+ snapshot:
+ type: str
+ description:
+ - The name of the snapshot volume
+ pvs:
+ type: str
+ description:
+ - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ thinpool:
+ type: str
+ description:
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ shrink:
+ description:
+ - Shrink if current size is higher than size requested.
+ type: bool
+ default: 'yes'
+ resizefs:
+ description:
+ - Resize the underlying filesystem together with the logical volume.
+ type: bool
+ default: 'no'
+notes:
+ - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
+'''
+
+EXAMPLES = '''
+- name: Create a logical volume of 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+- name: Create cache pool logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
+
+- name: Create a logical volume of 512g.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+
+- name: Create a logical volume the size of all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
+
+- name: Create a logical volume with special options
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
+
+- name: Extend the logical volume to 1024m.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+- name: Extend the logical volume to consume all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+ resizefs: true
+
+- name: Resize the logical volume to % of VG
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: yes
+
+- name: Reduce the logical volume to 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: yes
+
+- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: no
+
+- name: Remove the logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: yes
+
+- name: Create a snapshot volume of the test logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+- name: Deactivate a logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+- name: Create a deactivated logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
+
+- name: Create a thin pool of 512g
+ community.general.lvol:
+ vg: firefly
+ thinpool: testpool
+ size: 512g
+
+- name: Create a thin volume of 128g
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ thinpool: testpool
+ size: 128g
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LVOL_ENV_VARS = dict(
+ # make sure we use the C locale when running lvol-related commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[', '').replace(']', ''),
+ 'size': float(parts[1]),
+ 'active': (parts[2][4] == 'a'),
+ 'thinpool': (parts[2][0] == 't'),
+ 'thinvol': (parts[2][0] == 'V'),
+ })
+ return lvs
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': float(parts[1]),
+ 'free': float(parts[2]),
+ 'ext_size': float(parts[3])
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str'),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ shrink=dict(type='bool', default=True),
+ active=dict(type='bool', default=True),
+ snapshot=dict(type='str'),
+ pvs=dict(type='str'),
+ resizefs=dict(type='bool', default=False),
+ thinpool=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['lv', 'thinpool'],
+ ),
+ )
+
+ module.run_command_environ_update = LVOL_ENV_VARS
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found is None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ resizefs = module.boolean(module.params['resizefs'])
+ thinpool = module.params['thinpool']
+ size_opt = 'L'
+ size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVCREATE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ if '%' not in size:
+ # LVCREATE(8) -L --size option unit
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit():
+ raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot:
+ # Check snapshot pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == lv or test_lv['name'] == thinpool:
+ if not test_lv['thinpool'] and not thinpool:
+ break
+ else:
+ module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
+ else:
+ module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
+ check_lv = snapshot
+
+ elif thinpool:
+ if lv:
+ # Check thin volume pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == thinpool:
+ break
+ else:
+ module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
+ check_lv = lv
+ else:
+ check_lv = thinpool
+ else:
+ check_lv = lv
+
+ for test_lv in lvs:
+ if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ # Require size argument except for snapshot of thin volumes
+ if (lv or thinpool) and not size:
+ for test_lv in lvs:
+ if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
+ break
+ else:
+ module.fail_json(msg="No size given.")
+
+ # create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ if size:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
+ elif thinpool and lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
+ elif thinpool and not lv:
+ cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ # Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+
+ # Round down to the next lowest whole physical extent
+ size_requested -= (size_requested % this_vg['ext_size'])
+
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ # resize LV based on absolute values
+ tool = None
+ if float(size) > this_lv['size']:
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and float(size) < this_lv['size']:
+ if float(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py
new file mode 100644
index 00000000..7314af28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements:
+- make
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run.
+ - Typically this would be something like C(install),C(test) or C(all)."
+ type: str
+ params:
+ description:
+ - Any extra parameters to pass to make.
+ type: dict
+ chdir:
+ description:
+ - Change to this directory before running make.
+ type: path
+ required: true
+ file:
+ description:
+ - Use a custom Makefile.
+ type: path
+ make:
+ description:
+ - Use a specific make binary.
+ type: path
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = r'''
+- name: Build the default target
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+
+- name: Run 'install' target as root
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: yes
+
+- name: Build 'all' target with extra arguments
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+
+- name: Build 'all' target with a custom Makefile
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ file: /some-project/Makefile
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(type='str'),
+ params=dict(type='dict'),
+ chdir=dict(type='path', required=True),
+ file=dict(type='path'),
+ make=dict(type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ make_path = module.params['make']
+ if make_path is None:
+ # Build up the invocation of `make` we are going to use
+ # For non-Linux OSes, prefer gmake (GNU make) over make
+ make_path = module.get_bin_path('gmake', required=False)
+ if not make_path:
+ # Fall back to system make
+ make_path = module.get_bin_path('make', required=True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ if module.params['file'] is not None:
+ base_command = [make_path, "-f", module.params['file'], make_target]
+ else:
+ base_command = [make_path, make_target]
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['-q'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't up to date, so we need to run it
+ rc, out, err = run_command(base_command, module,
+ check_rc=True)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ file=module.params['file']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py
new file mode 100644
index 00000000..1be917dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Kairo Araujo (@kairoaraujo)
+module: mksysb
+short_description: Generates AIX mksysb rootvg backups.
+description:
+ - This module manages a basic AIX mksysb (image) of rootvg.
+options:
+ backup_crypt_files:
+ description:
+ - Backup encrypted files.
+ type: bool
+ default: "yes"
+ backup_dmapi_fs:
+ description:
+ - Back up DMAPI filesystem files.
+ type: bool
+ default: "yes"
+ create_map_files:
+ description:
+ - Creates a new MAP files.
+ type: bool
+ default: "no"
+ exclude_files:
+ description:
+ - Excludes files using C(/etc/rootvg.exclude).
+ type: bool
+ default: "no"
+ exclude_wpar_files:
+ description:
+ - Excludes WPAR files.
+ type: bool
+ default: "no"
+ extended_attrs:
+ description:
+ - Backup extended attributes.
+ type: bool
+ default: "yes"
+ name:
+ type: str
+ description:
+ - Backup name
+ required: true
+ new_image_data:
+ description:
+ - Creates a new file data.
+ type: bool
+ default: "yes"
+ software_packing:
+ description:
+ - Exclude files from packing option listed in
+ C(/etc/exclude_packing.rootvg).
+ type: bool
+ default: "no"
+ storage_path:
+ type: str
+ description:
+ - Storage path where the mksysb will stored.
+ required: true
+ use_snapshot:
+ description:
+ - Creates backup using snapshots.
+ type: bool
+ default: "no"
+'''
+
+EXAMPLES = '''
+- name: Running a backup image mksysb
+ community.general.mksysb:
+ name: myserver
+ storage_path: /repository/images
+ exclude_files: yes
+ exclude_wpar_files: yes
+'''
+
+RETURN = '''
+changed:
+ description: Return changed for mksysb actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backup_crypt_files=dict(type='bool', default=True),
+ backup_dmapi_fs=dict(type='bool', default=True),
+ create_map_files=dict(type='bool', default=False),
+ exclude_files=dict(type='bool', default=False),
+ exclude_wpar_files=dict(type='bool', default=False),
+ extended_attrs=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ new_image_data=dict(type='bool', default=True),
+ software_packing=dict(type='bool', default=False),
+ storage_path=dict(type='str', required=True),
+ use_snapshot=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ # Command options.
+ map_file_opt = {
+ True: '-m',
+ False: ''
+ }
+
+ use_snapshot_opt = {
+ True: '-T',
+ False: ''
+ }
+
+ exclude_files_opt = {
+ True: '-e',
+ False: ''
+ }
+
+ exclude_wpar_opt = {
+ True: '-G',
+ False: ''
+ }
+
+ new_image_data_opt = {
+ True: '-i',
+ False: ''
+ }
+
+ soft_packing_opt = {
+ True: '',
+ False: '-p'
+ }
+
+ extend_attr_opt = {
+ True: '',
+ False: '-a'
+ }
+
+ crypt_files_opt = {
+ True: '',
+ False: '-Z'
+ }
+
+ dmapi_fs_opt = {
+ True: '-a',
+ False: ''
+ }
+
+ backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
+ backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
+ create_map_files = map_file_opt[module.params['create_map_files']]
+ exclude_files = exclude_files_opt[module.params['exclude_files']]
+ exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
+ extended_attrs = extend_attr_opt[module.params['extended_attrs']]
+ name = module.params['name']
+ new_image_data = new_image_data_opt[module.params['new_image_data']]
+ software_packing = soft_packing_opt[module.params['software_packing']]
+ storage_path = module.params['storage_path']
+ use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
+
+ # Validate if storage_path is a valid directory.
+ if os.path.isdir(storage_path):
+ if not module.check_mode:
+ # Generates the mksysb image backup.
+ mksysb_cmd = module.get_bin_path('mksysb', True)
+ rc, mksysb_output, err = module.run_command(
+ "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
+ mksysb_cmd, create_map_files, use_snapshot, exclude_files,
+ exclude_wpar_files, software_packing, extended_attrs,
+ backup_crypt_files, backup_dmapi_fs, new_image_data,
+ storage_path, name))
+ if rc == 0:
+ module.exit_json(changed=True, msg=mksysb_output)
+ else:
+ module.fail_json(msg="mksysb failed.", rc=rc, err=err)
+
+ module.exit_json(changed=True)
+
+ else:
+ module.fail_json(msg="Storage path %s is not valid." % storage_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py
new file mode 100644
index 00000000..0ab75235
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Load or unload kernel modules
+author:
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
+description:
+ - Load or unload kernel modules.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [ absent, present ]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+'''
+
+EXAMPLES = '''
+- name: Add the 802.1q module
+ community.general.modprobe:
+ name: 8021q
+ state: present
+
+- name: Add the dummy module
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+'''
+
+import os.path
+import shlex
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ params=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ params = module.params['params']
+ state = module.params['state']
+
+ # FIXME: Adding all parameters as result values is useless
+ result = dict(
+ changed=False,
+ name=name,
+ params=params,
+ state=state,
+ )
+
+ # Check if module is present
+ try:
+ present = False
+ with open('/proc/modules') as modules:
+ module_name = name.replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ present = True
+ break
+ if not present:
+ command = [module.get_bin_path('uname', True), '-r']
+ rc, uname_kernel_release, err = module.run_command(command)
+ module_file = '/' + name + '.ko'
+ builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(),
+ 'modules.builtin')
+ with open(builtin_path) as builtins:
+ for line in builtins:
+ if line.endswith(module_file):
+ present = True
+ break
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
+
+ # Add/remove module as needed
+ if state == 'present':
+ if not present:
+ if not module.check_mode:
+ command = [module.get_bin_path('modprobe', True), name]
+ command.extend(shlex.split(params))
+ rc, out, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+ elif state == 'absent':
+ if present:
+ if not module.check_mode:
+ rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py
new file mode 100644
index 00000000..0f7de471
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Thomas Caravia <taca@kadisius.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nosh
+author:
+ - "Thomas Caravia (@tacatac)"
+short_description: Manage services with nosh
+description:
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [ started, stopped, reset, restarted, reloaded ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ C(restarted) will always bounce the service.
+ C(reloaded) will send a SIGHUP or start the service.
+ C(reset) will start or stop the service according to whether it is
+ enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file
+ preference or running state. Mutually exclusive with I(preset). Will take
+ effect prior to I(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in *.preset files.
+ Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
+ effect prior to I(state=reset).
+ user:
+ required: false
+ default: 'no'
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than
+ the system-wide service manager.
+requirements:
+ - A system with an active nosh service manager, see Notes for further information.
+notes:
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+'''
+
+EXAMPLES = '''
+- name: Start dnscache if not running
+ community.general.nosh: name=dnscache state=started
+
+- name: Stop mpd, if running
+ community.general.nosh: name=mpd state=stopped
+
+- name: Restart unbound or start it if not already running
+ community.general.nosh:
+ name: unbound
+ state: restarted
+
+- name: Reload fail2ban or start it if not already running
+ community.general.nosh:
+ name: fail2ban
+ state: reloaded
+
+- name: Disable nsd
+ community.general.nosh: name=nsd enabled=no
+
+- name: For package installers, set nginx running state according to local enable settings, preset and reset
+ community.general.nosh: name=nginx preset=True state=reset
+
+- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
+ community.general.nosh: name=reboot state=started
+
+- name: Using conditionals with the module facts
+ tasks:
+ - name: Obtain information on tinydns service
+ community.general.nosh: name=tinydns
+ register: result
+
+ - name: Fail if service not loaded
+ ansible.builtin.fail: msg="The {{ result.name }} service is not loaded"
+ when: not result.status
+
+ - name: Fail if service is running
+ ansible.builtin.fail: msg="The {{ result.name }} service is running"
+ when: result.status and result.status['DaemontoolsEncoreState'] == "running"
+'''
+
+RETURN = '''
+name:
+ description: name used to find the service
+ returned: success
+ type: str
+ sample: "sshd"
+service_path:
+ description: resolved path for the service
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
+enabled:
+ description: whether the service is enabled at system bootstrap
+ returned: success
+ type: bool
+ sample: True
+preset:
+ description: whether the enabled status reflects the one set in the relevant C(*.preset) file
+ returned: success
+ type: bool
+ sample: 'False'
+state:
+ description: service process run state, C(None) if the service is not loaded and will not be started
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
+status:
+ description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: True
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+user:
+ description: whether the user-level service manager is called
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def run_sys_ctl(module, args):
+ sys_ctl = [module.get_bin_path('system-control', required=True)]
+ if module.params['user']:
+ sys_ctl = sys_ctl + ['--user']
+ return module.run_command(sys_ctl + args)
+
+
+def get_service_path(module, service):
+ (rc, out, err) = run_sys_ctl(module, ['find', service])
+ # fail if service not found
+ if rc != 0:
+ fail_if_missing(module, False, service, msg='host')
+ else:
+ return to_native(out).strip()
+
+
+def service_is_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
+ return rc == 0
+
+
+def service_is_preset_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
+ return to_native(out).strip().startswith("enable")
+
+
+def service_is_loaded(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
+ return rc == 0
+
+
+def get_service_status(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
+ # will fail if not service is not loaded
+ if err is not None and err:
+ module.fail_json(msg=err)
+ else:
+ json_out = json.loads(to_native(out).strip())
+ status = json_out[service_path] # descend past service path header
+ return status
+
+
+def service_is_running(service_status):
+ return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
+
+
+def handle_enabled(module, result, service_path):
+ """Enable or disable a service as needed.
+
+ - 'preset' will set the enabled state according to available preset file settings.
+ - 'enabled' will set the enabled state explicitly, independently of preset settings.
+
+ These options are set to "mutually exclusive" but the explicit 'enabled' option will
+ have priority if the check is bypassed.
+ """
+
+ # computed prior in control flow
+ preset = result['preset']
+ enabled = result['enabled']
+
+ # preset, effect only if option set to true (no reverse preset)
+ if module.params['preset']:
+ action = 'preset'
+
+ # run preset if needed
+ if preset != module.params['preset']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['preset'] = not preset
+ result['enabled'] = not enabled
+
+ # enabled/disabled state
+ if module.params['enabled'] is not None:
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['enabled'] = not enabled
+ result['preset'] = not preset
+
+
+def handle_state(module, result, service_path):
+ """Set service running state as needed.
+
+ Takes into account the fact that a service may not be loaded (no supervise directory) in
+ which case it is 'stopped' as far as the service manager is concerned. No status information
+ can be obtained and the service can only be 'started'.
+ """
+ # default to desired state, no action
+ result['state'] = module.params['state']
+ state = module.params['state']
+ action = None
+
+ # computed prior in control flow, possibly modified by handle_enabled()
+ enabled = result['enabled']
+
+ # service not loaded -> not started by manager, no status information
+ if not service_is_loaded(module, service_path):
+ if state in ['started', 'restarted', 'reloaded']:
+ action = 'start'
+ result['state'] = 'started'
+ elif state == 'reset':
+ if enabled:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ result['state'] = None
+ else:
+ result['state'] = None
+
+ # service is loaded
+ else:
+ # get status information
+ result['status'] = get_service_status(module, service_path)
+ running = service_is_running(result['status'])
+
+ if state == 'started':
+ if not running:
+ action = 'start'
+ elif state == 'stopped':
+ if running:
+ action = 'stop'
+ # reset = start/stop according to enabled status
+ elif state == 'reset':
+ if enabled is not running:
+ if running:
+ action = 'stop'
+ result['state'] = 'stopped'
+ else:
+ action = 'start'
+ result['state'] = 'started'
+ # start if not running, 'service' module constraint
+ elif state == 'restarted':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'condrestart'
+ # start if not running, 'service' module constraint
+ elif state == 'reloaded':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'hangup'
+
+ # change state as needed
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ preset=dict(type='bool'),
+ user=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['enabled', 'preset']],
+ )
+
+ service = module.params['name']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': None,
+ }
+
+ # check service can be found (or fail) and get path
+ service_path = get_service_path(module, service)
+
+ # get preliminary service facts
+ result['service_path'] = service_path
+ result['user'] = module.params['user']
+ result['enabled'] = service_is_enabled(module, service_path)
+ result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
+
+ # set enabled state, service need not be loaded
+ if module.params['enabled'] is not None or module.params['preset']:
+ handle_enabled(module, result, service_path)
+
+ # set service running state
+ if module.params['state'] is not None:
+ handle_state(module, result, service_path)
+
+ # get final service status if possible
+ if service_is_loaded(module, service_path):
+ result['status'] = get_service_status(module, service_path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py
new file mode 100644
index 00000000..64092fd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
+ (U(https://docs.chef.io/ohai.html)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
new file mode 100644
index 00000000..222bb82f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: open_iscsi
+author:
+- Serge van Ginderachter (@srvg)
+short_description: Manage iSCSI targets with Open-iSCSI
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+options:
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ ip ]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: 3260
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [ name, targetname ]
+ login:
+ description:
+ - Whether the target node should be connected.
+ type: bool
+ aliases: [ state ]
+ node_auth:
+ description:
+ - The value for C(discovery.sendtargets.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(discovery.sendtargets.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(discovery.sendtargets.auth.password).
+ type: str
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [ automatic ]
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
+ to manual, hence combined with C(auto_node_startup=yes) will always return
+ a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Perform a discovery on sun.com and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ portal: sun.com
+
+- name: Perform a discovery on 10.1.2.3 and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ ip: 10.1.2.3
+
+# NOTE: Only works if exactly one target is exported to the initiator
+- name: Discover targets on portal and login to the one available
+ community.general.open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: yes
+ discover: yes
+
+- name: Connect to the named target, after updating the local persistent database (cache)
+ community.general.open_iscsi:
+ login: yes
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Disconnect from the cached named target
+ community.general.open_iscsi:
+ login: no
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+'''
+
+import glob
+import os
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+ISCSIADM = 'iscsiadm'
+
+
+def compare_nodelists(l1, l2):
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+ cmd = '%s --mode node' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ nodes = []
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ nodes = []
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+ cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_loggedon(module, target):
+ cmd = '%s --mode session' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ return target in out
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target, portal=None, port=None):
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ (rc, out, err) = module.run_command(cmd)
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
+ if portal is not None and port is not None:
+ cmd += ' --portal %s:%s' % (portal, port)
+
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_logout(module, target):
+ cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_device_node(module, target):
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target):
+ cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setauto(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setmanual(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def main():
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # target
+ portal=dict(type='str', aliases=['ip']),
+ port=dict(type='str', default='3260'),
+ target=dict(type='str', aliases=['name', 'targetname']),
+ node_auth=dict(type='str', default='CHAP'),
+ node_user=dict(type='str'),
+ node_pass=dict(type='str', no_log=True),
+
+ # actions
+ login=dict(type='bool', aliases=['state']),
+ auto_node_startup=dict(type='bool', aliases=['automatic']),
+ discover=dict(type='bool', default=False),
+ show_nodes=dict(type='bool', default=False),
+ ),
+
+ required_together=[['node_user', 'node_pass']],
+ supports_check_mode=True,
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ if portal:
+ try:
+ portal = socket.getaddrinfo(portal, None)[0][4][0]
+ except socket.gaierror:
+ module.fail_json(msg="Portal address is incorrect")
+
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {}
+ result['changed'] = False
+
+ if discover:
+ if portal is None:
+ module.fail_json(msg="Need to specify at least the portal (ip) to discover")
+ elif check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg="Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg="Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(module, target)
+ elif not check:
+ if login:
+ target_login(module, target, portal, port)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(module, target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py
new file mode 100644
index 00000000..817ed9f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Andrew Gaffney <andrew@agaffney.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+short_description: Manage services on OpenWrt.
+description:
+ - Controls OpenWrt services on remote hosts.
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system (with python)
+'''
+
+EXAMPLES = '''
+- name: Start service httpd, if not running
+ community.general.openwrt_init:
+ state: started
+ name: httpd
+
+- name: Stop service cron, if running
+ community.general.openwrt_init:
+ name: cron
+ state: stopped
+
+- name: Reload service httpd, in all cases
+ community.general.openwrt_init:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd
+ community.general.openwrt_init:
+ name: httpd
+ enabled: yes
+'''
+
+RETURN = '''
+'''
+
+import os
+import glob
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+module = None
+init_script = None
+
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ (rc, out, err) = module.run_command("%s enabled" % init_script)
+ if rc == 0:
+ return True
+ return False
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ pattern=dict(type='str', required=False, default=None),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ (rc, psout, pserr) = module.run_command('%s w' % psbin)
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ for line in lines:
+ if module.params['pattern'] in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ running = True
+ break
+ else:
+ (rc, out, err) = module.run_command("%s running" % init_script)
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py
new file mode 100644
index 00000000..a0362908
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com> (DO NOT CONTACT!)
+# Copyright: (c) 2019, Ansible project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: osx_defaults
+author:
+# DO NOT RE-ADD GITHUB HANDLE!
+- Franck Nijhof (!UNKNOWN)
+short_description: Manage macOS user defaults
+description:
+ - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - macOS applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications are not running (such as default font for new
+ documents, or the position of an Info panel).
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form C(com.companyname.appname).
+ type: str
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply.
+ - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ type: str
+ key:
+ description:
+ - The key of the user preference.
+ type: str
+ type:
+ description:
+ - The type of value to write.
+ type: str
+ choices: [ array, bool, boolean, date, float, int, integer, string ]
+ default: string
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ type: bool
+ default: no
+ value:
+ description:
+ - The value to write.
+ - Only required when C(state=present).
+ type: raw
+ state:
+ description:
+ - The state of the user defaults.
+ - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
+ - C(list) added in version 2.8.
+ type: str
+ choices: [ absent, list, present ]
+ default: present
+ path:
+ description:
+ - The path in which to search for C(defaults).
+ type: str
+ default: /usr/bin:/usr/local/bin
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = r'''
+# TODO: Describe what happens in each example
+
+- community.general.osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- community.general.osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- community.general.osx_defaults:
+ domain: /Library/Preferences/com.apple.SoftwareUpdate
+ key: AutomaticCheckEnabled
+ type: int
+ value: 1
+ become: yes
+
+- community.general.osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- community.general.osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
+- community.general.osx_defaults:
+ key: AppleLanguages
+ type: array
+ value:
+ - en
+ - nl
+
+- community.general.osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
+'''
+
+from datetime import datetime
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import binary_type, text_type
+
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ def __init__(self, msg):
+ self.message = msg
+
+
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ def __init__(self, module):
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ # Initial var for storing current defaults value
+ self.current_value = None
+ self.module = module
+ self.domain = module.params['domain']
+ self.host = module.params['host']
+ self.key = module.params['key']
+ self.type = module.params['type']
+ self.array_add = module.params['array_add']
+ self.value = module.params['value']
+ self.state = module.params['state']
+ self.path = module.params['path']
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # Ensure the value is the correct type
+ if self.state != 'absent':
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ @staticmethod
+ def is_int(value):
+ as_str = str(value)
+ if (as_str.startswith("-")):
+ return as_str[1:].isdigit()
+ else:
+ return as_str.isdigit()
+
+ @staticmethod
+ def _convert_type(data_type, value):
+ """ Converts value to given type """
+ if data_type == "string":
+ return str(value)
+ elif data_type in ["bool", "boolean"]:
+ if isinstance(value, (binary_type, text_type)):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif data_type == "date":
+ try:
+ return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif data_type in ["int", "integer"]:
+ if not OSXDefaults.is_int(value):
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif data_type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif data_type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
+
+ def _host_args(self):
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ def _base_command(self):
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ return [self.executable] + self._host_args()
+
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+ """ Converts array output from defaults to an list """
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes
+ value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value]
+
+ return value
+
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ def read(self):
+ """ Reads value of this domain & key from defaults """
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exist
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out)
+
+ # Ok, lets parse the type from output
+ data_type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # An non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out)
+
+ # Convert string to list when type is array
+ if data_type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(data_type, out)
+
+ def write(self):
+ """ Writes value to this domain & key to defaults """
+ # We need to convert some values so the defaults commandline understands it
+ if isinstance(self.value, bool):
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif isinstance(self.value, (int, float)):
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out)
+
+ def delete(self):
+ """ Deletes defaults key from domain """
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ if self.state == 'list':
+ self.module.exit_json(key=self.key, value=self.current_value)
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', default='NSGlobalDomain'),
+ host=dict(type='str'),
+ key=dict(type='str'),
+ type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
+ array_add=dict(type='bool', default=False),
+ value=dict(type='raw'),
+ state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
+ path=dict(type='str', default='/usr/bin:/usr/local/bin'),
+ ),
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['value']),
+ ),
+ )
+
+ try:
+ defaults = OSXDefaults(module=module)
+ module.exit_json(changed=defaults.run())
+ except OSXDefaultsException as e:
+ module.fail_json(msg=e.message)
+
+
+# /main ------------------------------------------------------------------- }}}
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py
new file mode 100644
index 00000000..c63493ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pam_limits
+author:
+ - "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The C(pam_limits) module modifies PAM limits. The default file is
+ C(/etc/security/limits.conf). For the full documentation, see C(man 5
+ limits.conf).
+options:
+ domain:
+ type: str
+ description:
+ - A username, @groupname, wildcard, uid/gid range.
+ required: true
+ limit_type:
+ type: str
+ description:
+ - Limit type, see C(man 5 limits.conf) for an explanation
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ type: str
+ description:
+ - The limit to be set
+ required: true
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
+ value:
+ type: str
+ description:
+ - The value of the limit.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ type: bool
+ default: "no"
+ use_min:
+ description:
+ - If set to C(yes), the minimal value will be used or conserved.
+ If the specified value is inferior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ use_max:
+ description:
+ - If set to C(yes), the maximal value will be used or conserved.
+ If the specified value is superior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ dest:
+ type: str
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ type: str
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+notes:
+ - If C(dest) file doesn't exist, it is created.
+'''
+
+EXAMPLES = '''
+- name: Add or modify nofile soft limit for the user joe
+ community.general.pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+ community.general.pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: yes
+
+- name: Add or modify memlock, both soft and hard, limit for the user james with a comment.
+ community.general.pam_limits:
+ domain: james
+ limit_type: '-'
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
+
+- name: Add or modify hard nofile limits for wildcard domain
+ community.general.pam_limits:
+ domain: '*'
+ limit_type: hard
+ limit_item: nofile
+ value: 39693561
+'''
+
+import os
+import os.path
+import tempfile
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
+
+ pam_types = ['soft', 'hard', '-']
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ domain=dict(required=True, type='str'),
+ limit_type=dict(required=True, type='str', choices=pam_types),
+ limit_item=dict(required=True, type='str', choices=pam_items),
+ value=dict(required=True, type='str'),
+ use_max=dict(default=False, type='bool'),
+ use_min=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ dest=dict(default=limits_conf, type='str'),
+ comment=dict(required=False, default='', type='str')
+ )
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
+ else:
+ limits_conf_dir = os.path.dirname(limits_conf)
+ if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
+ open(limits_conf, 'a').close()
+ changed = True
+ else:
+ module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time.")
+
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open(limits_conf, 'rb')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(mode='w+')
+
+ found = False
+ new_value = value
+
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#', 1)[0]
+ try:
+ old_comment = line.split('#', 1)[1]
+ except Exception:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
+ if use_max:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ if use_min:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, f.name)
+
+ try:
+ nf.close()
+ except Exception:
+ pass
+
+ res_args = dict(
+ changed=changed, msg=message
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py
new file mode 100644
index 00000000..45f00826
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: pamd
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Manage PAM Modules
+description:
+ - Edit PAM service's type, control, module path and module arguments.
+ - In order for a PAM rule to be modified, the type, control and
+ module_path must match an existing rule. See man(5) pam.d for details.
+options:
+ name:
+ description:
+ - The name generally refers to the PAM service file to
+ change, for example system-auth.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ control:
+ description:
+ - The control of the PAM rule being modified.
+ - This may be a complicated control with brackets. If this is the case, be
+ sure to put "[bracketed controls]" in quotes.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ module_path:
+ description:
+ - The module path of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ new_type:
+ description:
+ - The new type to assign to the new rule.
+ type: str
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ new_control:
+ description:
+ - The new control to assign to the new rule.
+ type: str
+ new_module_path:
+ description:
+ - The new module path to be assigned to the new rule.
+ type: str
+ module_arguments:
+ description:
+ - When state is C(updated), the module_arguments will replace existing module_arguments.
+ - When state is C(args_absent) args matching those listed in module_arguments will be removed.
+ - When state is C(args_present) any args listed in module_arguments are added if
+ missing from the existing rule.
+ - Furthermore, if the module argument takes a value denoted by C(=),
+ the value will be changed to that specified in module_arguments.
+ type: list
+ elements: str
+ state:
+ description:
+ - The default of C(updated) will modify an existing rule if type,
+ control and module_path all match an existing rule.
+ - With C(before), the new rule will be inserted before a rule matching type,
+ control and module_path.
+ - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ control and module_path.
+ - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
+ - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
+ - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ type: str
+ choices: [ absent, before, after, args_absent, args_present, updated ]
+ default: updated
+ path:
+ description:
+ - This is the path to the PAM service files.
+ type: path
+ default: /etc/pam.d
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Update pamd rule's control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_control: sufficient
+
+- name: Update pamd rule's complex control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ new_control: '[success=2 default=ignore]'
+
+- name: Insert a new rule before an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_type: auth
+ new_control: sufficient
+ new_module_path: pam_faillock.so
+ state: before
+
+- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
+ existing rule pam_rootok.so
+ community.general.pamd:
+ name: su
+ type: auth
+ control: sufficient
+ module_path: pam_rootok.so
+ new_type: auth
+ new_control: required
+ new_module_path: pam_wheel.so
+ module_arguments: 'use_uid'
+ state: after
+
+- name: Remove module arguments from an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: ''
+ state: updated
+
+- name: Replace all module arguments in an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'preauth
+ silent
+ deny=3
+ unlock_time=604800
+ fail_interval=900'
+ state: updated
+
+- name: Remove specific arguments from a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_absent
+
+- name: Ensure specific arguments are present in a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_present
+
+- name: Ensure specific arguments are present in a rule (alternative)
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments:
+ - crond
+ - quiet
+ state: args_present
+
+- name: Module arguments requiring commas must be listed as a Yaml list
+ community.general.pamd:
+ name: special-module
+ type: account
+ control: required
+ module_path: pam_access.so
+ module_arguments:
+ - listsep=,
+ state: args_present
+
+- name: Update specific argument value in a rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'fail_interval=300'
+ state: args_present
+
+- name: Add pam common-auth rule for duo
+ community.general.pamd:
+ name: common-auth
+ new_type: auth
+ new_control: '[success=1 default=ignore]'
+ new_module_path: '/lib64/security/pam_duo.so'
+ state: after
+ type: auth
+ module_path: pam_sss.so
+ control: 'requisite'
+'''
+
+RETURN = r'''
+change_count:
+ description: How many rules were changed.
+ type: int
+ sample: 1
+ returned: success
+new_rule:
+ description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6.
+ type: str
+ sample: None None None sha512 shadow try_first_pass use_authtok
+ returned: success
+updated_rule_(n):
+ description: The rule(s) that was/were changed. This is only available in
+ Ansible 2.4 and was removed in Ansible 2.5.
+ type: str
+ sample:
+ - password sufficient pam_unix.so sha512 shadow try_first_pass
+ use_authtok
+ returned: success
+action:
+ description:
+ - "That action that was taken and is one of: update_rule,
+ insert_before_rule, insert_after_rule, args_present, args_absent,
+ absent. This was available in Ansible 2.4 and removed in Ansible 2.8"
+ returned: always
+ type: str
+ sample: "update_rule"
+dest:
+ description:
+ - "Path to pam.d service that was changed. This is only available in
+ Ansible 2.3 and was removed in Ansible 2.4."
+ returned: success
+ type: str
+ sample: "/etc/pam.d/system-auth"
+backupdest:
+ description:
+ - "The file name of the backup file, if created."
+ returned: success
+ type: str
+...
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+from tempfile import NamedTemporaryFile
+from datetime import datetime
+
+
+RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
+ (?P<control>\[.*\]|\S*)\s+
+ (?P<path>\S*)\s*
+ (?P<args>.*)\s*""", re.X)
+
+RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
+
+VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
+
+
+class PamdLine(object):
+
+ def __init__(self, line):
+ self.line = line
+ self.prev = None
+ self.next = None
+
+ @property
+ def is_valid(self):
+ if self.line.strip() == '':
+ return True
+ return False
+
+ def validate(self):
+ if not self.is_valid:
+ return False, "Rule is not valid " + self.line
+ return True, "Rule is valid " + self.line
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return False
+
+ def __str__(self):
+ return str(self.line)
+
+
+class PamdEmptyLine(PamdLine):
+ pass
+
+
+class PamdComment(PamdLine):
+
+ def __init__(self, line):
+ super(PamdComment, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('#'):
+ return True
+ return False
+
+
+class PamdInclude(PamdLine):
+ def __init__(self, line):
+ super(PamdInclude, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('@include'):
+ return True
+ return False
+
+
+class PamdRule(PamdLine):
+
+ valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
+ valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
+ 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
+ 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
+ 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
+ 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
+ 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
+ 'incomplete', 'default']
+ valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
+
+ def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
+ self.prev = None
+ self.next = None
+ self._control = None
+ self._args = None
+ self.rule_type = rule_type
+ self.rule_control = rule_control
+
+ self.rule_path = rule_path
+ self.rule_args = rule_args
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ if (rule_type == self.rule_type and
+ rule_control == self.rule_control and
+ rule_path == self.rule_path):
+ return True
+ return False
+
+ @classmethod
+ def rule_from_string(cls, line):
+ rule_match = RULE_REGEX.search(line)
+ rule_args = parse_module_arguments(rule_match.group('args'))
+ return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
+
+ def __str__(self):
+ if self.rule_args:
+ return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
+ return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
+
+ @property
+ def rule_control(self):
+ if isinstance(self._control, list):
+ return '[' + ' '.join(self._control) + ']'
+ return self._control
+
+ @rule_control.setter
+ def rule_control(self, control):
+ if control.startswith('['):
+ control = control.replace(' = ', '=').replace('[', '').replace(']', '')
+ self._control = control.split(' ')
+ else:
+ self._control = control
+
+ @property
+ def rule_args(self):
+ if not self._args:
+ return []
+ return self._args
+
+ @rule_args.setter
+ def rule_args(self, args):
+ self._args = parse_module_arguments(args)
+
+ @property
+ def line(self):
+ return str(self)
+
+ @classmethod
+ def is_action_unsigned_int(cls, string_num):
+ number = 0
+ try:
+ number = int(string_num)
+ except ValueError:
+ return False
+
+ if number >= 0:
+ return True
+ return False
+
+ @property
+ def is_valid(self):
+ return self.validate()[0]
+
+ def validate(self):
+ # Validate the rule type
+ if self.rule_type not in VALID_TYPES:
+ return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
+ # Validate the rule control
+ if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
+ return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
+ elif isinstance(self._control, list):
+ for control in self._control:
+ value, action = control.split("=")
+ if value not in PamdRule.valid_control_values:
+ return False, "Rule control value, " + value + ", is not valid in rule " + self.line
+ if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
+ return False, "Rule control action, " + action + ", is not valid in rule " + self.line
+
+ # TODO: Validate path
+
+ return True, "Rule is valid " + self.line
+
+
+# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
+# as a doubly linked list.
+class PamdService(object):
+
+ def __init__(self, content):
+ self._head = None
+ self._tail = None
+ for line in content.splitlines():
+ if line.lstrip().startswith('#'):
+ pamd_line = PamdComment(line)
+ elif line.lstrip().startswith('@include'):
+ pamd_line = PamdInclude(line)
+ elif line.strip() == '':
+ pamd_line = PamdEmptyLine(line)
+ else:
+ pamd_line = PamdRule.rule_from_string(line)
+
+ self.append(pamd_line)
+
+ def append(self, pamd_line):
+ if self._head is None:
+ self._head = self._tail = pamd_line
+ else:
+ pamd_line.prev = self._tail
+ pamd_line.next = None
+ self._tail.next = pamd_line
+ self._tail = pamd_line
+
+ def remove(self, rule_type, rule_control, rule_path):
+ current_line = self._head
+ changed = 0
+
+ while current_line is not None:
+ if current_line.matches(rule_type, rule_control, rule_path):
+ if current_line.prev is not None:
+ current_line.prev.next = current_line.next
+ if current_line.next is not None:
+ current_line.next.prev = current_line.prev
+ else:
+ self._head = current_line.next
+ current_line.next.prev = None
+ changed += 1
+
+ current_line = current_line.next
+ return changed
+
+ def get(self, rule_type, rule_control, rule_path):
+ lines = []
+ current_line = self._head
+ while current_line is not None:
+
+ if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
+ lines.append(current_line)
+
+ current_line = current_line.next
+
+ return lines
+
+ def has_rule(self, rule_type, rule_control, rule_path):
+ if self.get(rule_type, rule_control, rule_path):
+ return True
+ return False
+
+ def update_rule(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ new_args = parse_module_arguments(new_args)
+
+ changes = 0
+ for current_rule in rules_to_find:
+ rule_changed = False
+ if new_type:
+ if(current_rule.rule_type != new_type):
+ rule_changed = True
+ current_rule.rule_type = new_type
+ if new_control:
+ if(current_rule.rule_control != new_control):
+ rule_changed = True
+ current_rule.rule_control = new_control
+ if new_path:
+ if(current_rule.rule_path != new_path):
+ rule_changed = True
+ current_rule.rule_path = new_path
+ if new_args:
+ if(current_rule.rule_args != new_args):
+ rule_changed = True
+ current_rule.rule_args = new_args
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def insert_before(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist before the existing rule
+ # 2. The new rule exists
+
+ for current_rule in rules_to_find:
+ # Create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ # First we'll get the previous rule.
+ previous_rule = current_rule.prev
+
+ # Next we may have to loop backwards if the previous line is a comment. If it
+ # is, we'll get the previous "rule's" previous.
+ while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)):
+ previous_rule = previous_rule.prev
+ # Next we'll see if the previous rule matches what we are trying to insert.
+ if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
+ # First set the original previous rule's next to the new_rule
+ previous_rule.next = new_rule
+ # Second, set the new_rule's previous to the original previous
+ new_rule.prev = previous_rule
+ # Third, set the new rule's next to the current rule
+ new_rule.next = current_rule
+ # Fourth, set the current rule's previous to the new_rule
+ current_rule.prev = new_rule
+
+ changes += 1
+
+ # Handle the case where it is the first rule in the list.
+ elif previous_rule is None:
+ # This is the case where the current rule is not only the first rule
+ # but the first line as well. So we set the head to the new rule
+ if current_rule.prev is None:
+ self._head = new_rule
+ # This case would occur if the previous line was a comment.
+ else:
+ current_rule.prev.next = new_rule
+ new_rule.prev = current_rule.prev
+ new_rule.next = current_rule
+ current_rule.prev = new_rule
+ changes += 1
+
+ return changes
+
+ def insert_after(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist after the existing rule
+ # 2. The new rule exists
+ for current_rule in rules_to_find:
+ # First we'll get the next rule.
+ next_rule = current_rule.next
+ # Next we may have to loop forwards if the next line is a comment. If it
+ # is, we'll get the next "rule's" next.
+ while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)):
+ next_rule = next_rule.next
+
+ # First we create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
+ # If the previous rule doesn't match we'll insert our new rule.
+
+ # Second set the original next rule's previous to the new_rule
+ next_rule.prev = new_rule
+ # Third, set the new_rule's next to the original next rule
+ new_rule.next = next_rule
+ # Fourth, set the new rule's previous to the current rule
+ new_rule.prev = current_rule
+ # Fifth, set the current rule's next to the new_rule
+ current_rule.next = new_rule
+
+ changes += 1
+
+ # This is the case where the current_rule is the last in the list
+ elif next_rule is None:
+ new_rule.prev = self._tail
+ new_rule.next = None
+ self._tail.next = new_rule
+ self._tail = new_rule
+
+ current_rule.next = new_rule
+ changes += 1
+
+ return changes
+
+ def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_add = parse_module_arguments(args_to_add)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ rule_changed = False
+
+ # create some structures to evaluate the situation
+ simple_new_args = set()
+ key_value_new_args = dict()
+
+ for arg in args_to_add:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_new_args[key] = value
+ else:
+ simple_new_args.add(arg)
+
+ key_value_new_args_set = set(key_value_new_args)
+
+ simple_current_args = set()
+ key_value_current_args = dict()
+
+ for arg in current_rule.rule_args:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_current_args[key] = value
+ else:
+ simple_current_args.add(arg)
+
+ key_value_current_args_set = set(key_value_current_args)
+
+ new_args_to_add = list()
+
+ # Handle new simple arguments
+ if simple_new_args.difference(simple_current_args):
+ for arg in simple_new_args.difference(simple_current_args):
+ new_args_to_add.append(arg)
+
+ # Handle new key value arguments
+ if key_value_new_args_set.difference(key_value_current_args_set):
+ for key in key_value_new_args_set.difference(key_value_current_args_set):
+ new_args_to_add.append(key + '=' + key_value_new_args[key])
+
+ if new_args_to_add:
+ current_rule.rule_args += new_args_to_add
+ rule_changed = True
+
+ # Handle existing key value arguments when value is not equal
+ if key_value_new_args_set.intersection(key_value_current_args_set):
+ for key in key_value_new_args_set.intersection(key_value_current_args_set):
+ if key_value_current_args[key] != key_value_new_args[key]:
+ arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
+ current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
+ rule_changed = True
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_remove = parse_module_arguments(args_to_remove)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ if not args_to_remove:
+ args_to_remove = []
+
+ # Let's check to see if there are any args to remove by finding the intersection
+ # of the rule's current args and the args_to_remove lists
+ if not list(set(current_rule.rule_args) & set(args_to_remove)):
+ continue
+
+ # There are args to remove, so we create a list of new_args absent the args
+ # to remove.
+ current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
+
+ changes += 1
+
+ return changes
+
+ def validate(self):
+ current_line = self._head
+
+ while current_line is not None:
+ if not current_line.validate()[0]:
+ return current_line.validate()
+ current_line = current_line.next
+ return True, "Module is valid"
+
+ def __str__(self):
+ lines = []
+ current_line = self._head
+
+ while current_line is not None:
+ lines.append(str(current_line))
+ current_line = current_line.next
+
+ if lines[1].startswith("# Updated by Ansible"):
+ lines.pop(1)
+
+ lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
+
+ return '\n'.join(lines) + '\n'
+
+
+def parse_module_arguments(module_arguments):
+ # Return empty list if we have no args to parse
+ if not module_arguments:
+ return []
+ elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
+ return []
+
+ if not isinstance(module_arguments, list):
+ module_arguments = [module_arguments]
+
+ parsed_args = list()
+
+ for arg in module_arguments:
+ for item in filter(None, RULE_ARG_REGEX.findall(arg)):
+ if not item.startswith("["):
+ re.sub("\\s*=\\s*", "=", item)
+ parsed_args.append(item)
+
+ return parsed_args
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=VALID_TYPES),
+ control=dict(type='str', required=True),
+ module_path=dict(type='str', required=True),
+ new_type=dict(type='str', choices=VALID_TYPES),
+ new_control=dict(type='str'),
+ new_module_path=dict(type='str'),
+ module_arguments=dict(type='list', elements='str'),
+ state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
+ path=dict(type='path', default='/etc/pam.d'),
+ backup=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "args_present", ["module_arguments"]),
+ ("state", "args_absent", ["module_arguments"]),
+ ("state", "before", ["new_control", "new_type", "new_module_path"]),
+ ("state", "after", ["new_control", "new_type", "new_module_path"]),
+ ],
+ )
+ content = str()
+ fname = os.path.join(module.params["path"], module.params["name"])
+
+ # Open the file and read the content or fail
+ try:
+ with open(fname, 'r') as service_file_obj:
+ content = service_file_obj.read()
+ except IOError as e:
+ # If unable to read the file, fail out
+ module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e)))
+
+ # Assuming we didn't fail, create the service
+ service = PamdService(content)
+ # Set the action
+ action = module.params['state']
+
+ changes = 0
+
+ # Take action
+ if action == 'updated':
+ changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'before':
+ changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'after':
+ changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_absent':
+ changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_present':
+ if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
+ module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
+
+ changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'absent':
+ changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
+
+ valid, msg = service.validate()
+
+ # If the module is not valid (meaning one of the rules is invalid), we will fail
+ if not valid:
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=(changes > 0),
+ change_count=changes,
+ backupdest='',
+ )
+
+ # If not check mode and something changed, backup the original if necessary then write out the file or fail
+ if not module.check_mode and result['changed']:
+ # First, create a backup if desired.
+ if module.params['backup']:
+ result['backupdest'] = module.backup_local(fname)
+ try:
+ temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
+ with open(temp_file.name, 'w') as fd:
+ fd.write(str(service))
+
+ except IOError:
+ module.fail_json(msg='Unable to create temporary \
+ file %s' % temp_file)
+
+ module.atomic_move(temp_file.name, os.path.realpath(fname))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py
new file mode 100644
index 00000000..daf68c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py
@@ -0,0 +1,797 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Fabrizio Colonna (@ColOfAbRiX)
+module: parted
+short_description: Configure block device partitions
+description:
+ - This module allows configuring block device partition using the C(parted)
+ command line tool. For a full description of the fields and the options
+ check the GNU parted manual.
+requirements:
+ - This module requires parted version 1.8.3 and above
+ - align option (except 'undefined') requires parted 2.1 and above
+ - If the version of parted is below 3.1, it requires a Linux version running
+ the sysfs file system C(/sys/).
+options:
+ device:
+ description: The block device (disk) where to operate.
+ type: str
+ required: True
+ align:
+ description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
+ type: str
+ choices: [ cylinder, minimal, none, optimal, undefined ]
+ default: optimal
+ number:
+ description:
+ - The number of the partition to work with or the number of the partition
+ that will be created.
+ - Required when performing any action on the disk, except fetching information.
+ type: int
+ unit:
+ description:
+ - Selects the current default unit that Parted will use to display
+ locations and capacities on the disk and to interpret those given by the
+ user if they are not suffixed by an unit.
+ - When fetching information about a disk, it is always recommended to specify a unit.
+ type: str
+ choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
+ default: KiB
+ label:
+ description:
+ - Disk label type to use.
+ - If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
+ type: str
+ choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
+ default: msdos
+ part_type:
+ description:
+ - May be specified only with 'msdos' or 'dvh' partition tables.
+ - A C(name) must be specified for a 'gpt' partition table.
+ - Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
+ type: str
+ choices: [ extended, logical, primary ]
+ default: primary
+ part_start:
+ description:
+ - Where the partition will start as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ - Using negative values may require setting of C(fs_type) (see notes).
+ type: str
+ default: 0%
+ part_end:
+ description:
+ - Where the partition will end as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ type: str
+ default: 100%
+ name:
+ description:
+ - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
+ type: str
+ flags:
+ description: A list of the flags that has to be set on the partition.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to create or delete a partition.
+ - If set to C(info) the module will only return the device information.
+ type: str
+ choices: [ absent, present, info ]
+ default: info
+ fs_type:
+ description:
+ - If specified and the partition does not exist, will set filesystem type to given partition.
+ - Parameter optional, but see notes below about negative negative C(part_start) values.
+ type: str
+ version_added: '0.2.0'
+ resize:
+ description:
+ - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ type: bool
+ default: false
+ version_added: '1.3.0'
+
+notes:
+ - When fetching information about a new disk and when the version of parted
+ installed on the system is before version 3.1, the module queries the kernel
+ through C(/sys/) to obtain disk information. In this case the units CHS and
+ CYL are not supported.
+ - Negative C(part_start) start values were rejected if C(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
+ specify C(fs_type) as well or make sure your system contains newer parted.
+'''
+
+RETURN = r'''
+partition_info:
+ description: Current partition information
+ returned: success
+ type: complex
+ contains:
+ disk:
+ description: Generic device information.
+ type: dict
+ partitions:
+ description: List of device partitions.
+ type: list
+ script:
+ description: parted script executed by module
+ type: str
+ sample: {
+ "disk": {
+ "dev": "/dev/sdb",
+ "logical_block": 512,
+ "model": "VMware Virtual disk",
+ "physical_block": 512,
+ "size": 5.0,
+ "table": "msdos",
+ "unit": "gib"
+ },
+ "partitions": [{
+ "begin": 0.0,
+ "end": 1.0,
+ "flags": ["boot", "lvm"],
+ "fstype": "",
+ "name": "",
+ "num": 1,
+ "size": 1.0
+ }, {
+ "begin": 1.0,
+ "end": 5.0,
+ "flags": [],
+ "fstype": "",
+ "name": "",
+ "num": 2,
+ "size": 4.0
+ }],
+ "script": "unit KiB print "
+ }
+'''
+
+EXAMPLES = r'''
+- name: Create a new ext4 primary partition
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ fs_type: ext4
+
+- name: Remove partition number 1
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: absent
+
+- name: Create a new primary partition with a size of 1GiB
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ part_end: 1GiB
+
+- name: Create a new primary partition for LVM
+ community.general.parted:
+ device: /dev/sdb
+ number: 2
+ flags: [ lvm ]
+ state: present
+ part_start: 1GiB
+
+- name: Create a new primary partition with a size of 1GiB at disk's end
+ community.general.parted:
+ device: /dev/sdb
+ number: 3
+ state: present
+ fs_type: ext3
+ part_start: -1GiB
+
+# Example on how to read info and reuse it in subsequent task
+- name: Read device information (always use unit when probing)
+ community.general.parted: device=/dev/sdb unit=MiB
+ register: sdb_info
+
+- name: Remove all partitions from disk
+ community.general.parted:
+ device: /dev/sdb
+ number: '{{ item.num }}'
+ state: absent
+ loop: '{{ sdb_info.partitions }}'
+
+- name: Extend an existing partition to fill all available space
+ community.general.parted:
+ device: /dev/sdb
+ number: "{{ sdb_info.partitions | length }}"
+ part_end: "100%"
+ resize: true
+ state: present
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import math
+import re
+import os
+
+
+# Reference prefixes (International System of Units and IEC)
+units_si = ['B', 'KB', 'MB', 'GB', 'TB']
+units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
+parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
+
+
+def parse_unit(size_str, unit=''):
+ """
+ Parses a string containing a size or boundary information
+ """
+ matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str)
+ if matches is None:
+ # "<cylinder>,<head>,<sector>" format
+ matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
+ if matches is None:
+ module.fail_json(
+ msg="Error interpreting parted size output: '%s'" % size_str
+ )
+
+ size = {
+ 'cylinder': int(matches.group(1)),
+ 'head': int(matches.group(2)),
+ 'sector': int(matches.group(3))
+ }
+ unit = 'chs'
+
+ else:
+ # Normal format: "<number>[<unit>]"
+ if matches.group(2) is not None:
+ unit = matches.group(2)
+
+ size = float(matches.group(1))
+
+ return size, unit
+
+
+def parse_partition_info(parted_output, unit):
+ """
+ Parses the output of parted and transforms the data into
+ a dictionary.
+
+ Parted Machine Parseable Output:
+ See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
+ 0573.html
+ - All lines end with a semicolon (;)
+ - The first line indicates the units in which the output is expressed.
+ CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
+ - The second line is made of disk information in the following format:
+ "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
+ e":"partition-table-type":"model-name";
+ - If the first line was either CYL or CHS, the next line will contain
+ information on no. of cylinders, heads, sectors and cylinder size.
+ - Partition information begins from the next line. This is of the format:
+ (for BYT)
+ "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
+ et";
+ (for CHS/CYL)
+ "number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
+ """
+ lines = [x for x in parted_output.split('\n') if x.strip() != '']
+
+ # Generic device info
+ generic_params = lines[1].rstrip(';').split(':')
+
+ # The unit is read once, because parted always returns the same unit
+ size, unit = parse_unit(generic_params[1], unit)
+
+ generic = {
+ 'dev': generic_params[0],
+ 'size': size,
+ 'unit': unit.lower(),
+ 'table': generic_params[5],
+ 'model': generic_params[6],
+ 'logical_block': int(generic_params[3]),
+ 'physical_block': int(generic_params[4])
+ }
+
+ # CYL and CHS have an additional line in the output
+ if unit in ['cyl', 'chs']:
+ chs_info = lines[2].rstrip(';').split(':')
+ cyl_size, cyl_unit = parse_unit(chs_info[3])
+ generic['chs_info'] = {
+ 'cylinders': int(chs_info[0]),
+ 'heads': int(chs_info[1]),
+ 'sectors': int(chs_info[2]),
+ 'cyl_size': cyl_size,
+ 'cyl_size_unit': cyl_unit.lower()
+ }
+ lines = lines[1:]
+
+ parts = []
+ for line in lines[2:]:
+ part_params = line.rstrip(';').split(':')
+
+ # CHS use a different format than BYT, but contrary to what stated by
+ # the author, CYL is the same as BYT. I've tested this undocumented
+ # behaviour down to parted version 1.8.3, which is the first version
+ # that supports the machine parseable output.
+ if unit != 'chs':
+ size = parse_unit(part_params[3])[0]
+ fstype = part_params[4]
+ name = part_params[5]
+ flags = part_params[6]
+
+ else:
+ size = ""
+ fstype = part_params[3]
+ name = part_params[4]
+ flags = part_params[5]
+
+ parts.append({
+ 'num': int(part_params[0]),
+ 'begin': parse_unit(part_params[1])[0],
+ 'end': parse_unit(part_params[2])[0],
+ 'size': size,
+ 'fstype': fstype,
+ 'name': name,
+ 'flags': [f.strip() for f in flags.split(', ') if f != ''],
+ 'unit': unit.lower(),
+ })
+
+ return {'generic': generic, 'partitions': parts}
+
+
+def format_disk_size(size_bytes, unit):
+ """
+ Formats a size in bytes into a different unit, like parted does. It doesn't
+ manage CYL and CHS formats, though.
+ This function has been adapted from https://github.com/Distrotech/parted/blo
+ b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
+ """
+ global units_si, units_iec
+
+ unit = unit.lower()
+
+ # Shortcut
+ if size_bytes == 0:
+ return 0.0, 'b'
+
+ # Cases where we default to 'compact'
+ if unit in ['', 'compact', 'cyl', 'chs']:
+ index = max(0, int(
+ (math.log10(size_bytes) - 1.0) / 3.0
+ ))
+ unit = 'b'
+ if index < len(units_si):
+ unit = units_si[index]
+
+ # Find the appropriate multiplier
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** units_iec.index(unit)
+
+ output = size_bytes // multiplier * (1 + 1E-16)
+
+ # Corrections to round up as per IEEE754 standard
+ if output < 10:
+ w = output + 0.005
+ elif output < 100:
+ w = output + 0.05
+ else:
+ w = output + 0.5
+
+ if w < 10:
+ precision = 2
+ elif w < 100:
+ precision = 1
+ else:
+ precision = 0
+
+ # Round and return
+ return round(output, precision), unit
+
+
+def convert_to_bytes(size_str, unit):
+ size = float(size_str)
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** (units_iec.index(unit) + 1)
+ elif unit in ['', 'compact', 'cyl', 'chs']:
+ # As per format_disk_size, default to compact, which defaults to megabytes
+ multiplier = 1000.0 ** units_si.index("MB")
+
+ output = size * multiplier
+ return int(output)
+
+
+def get_unlabeled_device_info(device, unit):
+ """
+ Fetches device information directly from the kernel and it is used when
+ parted cannot work because of a missing label. It always returns a 'unknown'
+ label.
+ """
+ device_name = os.path.basename(device)
+ base = "/sys/block/%s" % device_name
+
+ vendor = read_record(base + "/device/vendor", "Unknown")
+ model = read_record(base + "/device/model", "model")
+ logic_block = int(read_record(base + "/queue/logical_block_size", 0))
+ phys_block = int(read_record(base + "/queue/physical_block_size", 0))
+ size_bytes = int(read_record(base + "/size", 0)) * logic_block
+
+ size, unit = format_disk_size(size_bytes, unit)
+
+ return {
+ 'generic': {
+ 'dev': device,
+ 'table': "unknown",
+ 'size': size,
+ 'unit': unit,
+ 'logical_block': logic_block,
+ 'physical_block': phys_block,
+ 'model': "%s %s" % (vendor, model),
+ },
+ 'partitions': []
+ }
+
+
+def get_device_info(device, unit):
+ """
+ Fetches information about a disk and its partitions and it returns a
+ dictionary.
+ """
+ global module, parted_exec
+
+ # If parted complains about missing labels, it means there are no partitions.
+ # In this case only, use a custom function to fetch information and emulate
+ # parted formats for the unit.
+ label_needed = check_parted_label(device)
+ if label_needed:
+ return get_unlabeled_device_info(device, unit)
+
+ command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ rc, out, err = module.run_command(command)
+ if rc != 0 and 'unrecognised disk label' not in err:
+ module.fail_json(msg=(
+ "Error while getting device information with parted "
+ "script: '%s'" % command),
+ rc=rc, out=out, err=err
+ )
+
+ return parse_partition_info(out, unit)
+
+
+def check_parted_label(device):
+ """
+ Determines if parted needs a label to complete its duties. Versions prior
+ to 3.1 don't return data when there is no label. For more information see:
+ http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
+ """
+ global parted_exec
+
+ # Check the version
+ parted_major, parted_minor, _ = parted_version()
+ if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
+ return False
+
+ # Older parted versions return a message in the stdout and RC > 0.
+ rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ if rc != 0 and 'unrecognised disk label' in out.lower():
+ return True
+
+ return False
+
+
+def parse_parted_version(out):
+ """
+ Returns version tuple from the output of "parted --version" command
+ """
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ return None, None, None
+
+ # Sample parted versions (see as well test unit):
+ # parted (GNU parted) 3.3
+ # parted (GNU parted) 3.4.5
+ # parted (GNU parted) 3.3.14-dfc61
+ matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip())
+
+ if matches is None:
+ return None, None, None
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+
+def parted_version():
+ """
+ Returns the major and minor version of parted installed on the system.
+ """
+ global module, parted_exec
+
+ rc, out, err = module.run_command("%s --version" % parted_exec)
+ if rc != 0:
+ module.fail_json(
+ msg="Failed to get parted version.", rc=rc, out=out, err=err
+ )
+
+ (major, minor, rev) = parse_parted_version(out)
+ if major is None:
+ module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
+
+ return major, minor, rev
+
+
+def parted(script, device, align):
+ """
+ Runs a parted script.
+ """
+ global module, parted_exec
+
+ align_option = '-a %s' % align
+ if align == 'undefined':
+ align_option = ''
+
+ if script and not module.check_mode:
+ command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ rc, out, err = module.run_command(command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Error while running parted script: %s" % command.strip(),
+ rc=rc, out=out, err=err
+ )
+
+
+def read_record(file_path, default=None):
+ """
+ Reads the first line of a file and returns it.
+ """
+ try:
+ f = open(file_path, 'r')
+ try:
+ return f.readline().strip()
+ finally:
+ f.close()
+ except IOError:
+ return default
+
+
+def part_exists(partitions, attribute, number):
+ """
+ Looks if a partition that has a specific value for a specific attribute
+ actually exists.
+ """
+ return any(
+ part[attribute] and
+ part[attribute] == number for part in partitions
+ )
+
+
+def check_size_format(size_str):
+ """
+ Checks if the input string is an allowed size
+ """
+ size, unit = parse_unit(size_str)
+ return unit in parted_units
+
+
+def main():
+ global module, units_si, units_iec, parted_exec
+
+ changed = False
+ output_script = ""
+ script = ""
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='str', required=True),
+ align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']),
+ number=dict(type='int'),
+
+ # unit <unit> command
+ unit=dict(type='str', default='KiB', choices=parted_units),
+
+ # mklabel <label-type> command
+ label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
+
+ # mkpart <part-type> [<fs-type>] <start> <end> command
+ part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
+ part_start=dict(type='str', default='0%'),
+ part_end=dict(type='str', default='100%'),
+ fs_type=dict(type='str'),
+
+ # name <partition> <name> command
+ name=dict(type='str'),
+
+ # set <partition> <flag> <state> command
+ flags=dict(type='list', elements='str'),
+
+ # rm/mkpart command
+ state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
+
+ # resize part
+ resize=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ['state', 'present', ['number']],
+ ['state', 'absent', ['number']],
+ ],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
+
+ # Data extraction
+ device = module.params['device']
+ align = module.params['align']
+ number = module.params['number']
+ unit = module.params['unit']
+ label = module.params['label']
+ part_type = module.params['part_type']
+ part_start = module.params['part_start']
+ part_end = module.params['part_end']
+ name = module.params['name']
+ state = module.params['state']
+ flags = module.params['flags']
+ fs_type = module.params['fs_type']
+ resize = module.params['resize']
+
+ # Parted executable
+ parted_exec = module.get_bin_path('parted', True)
+
+ # Conditioning
+ if number is not None and number < 1:
+ module.fail_json(msg="The partition number must be greater then 0.")
+ if not check_size_format(part_start):
+ module.fail_json(
+ msg="The argument 'part_start' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_start)
+ )
+ if not check_size_format(part_end):
+ module.fail_json(
+ msg="The argument 'part_end' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_end)
+ )
+
+ # Read the current disk information
+ current_device = get_device_info(device, unit)
+ current_parts = current_device['partitions']
+
+ if state == 'present':
+
+ # Assign label if required
+ mklabel_needed = current_device['generic'].get('table', None) != label
+ if mklabel_needed:
+ script += "mklabel %s " % label
+
+ # Create partition if required
+ if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)):
+ script += "mkpart %s %s%s %s " % (
+ part_type,
+ '%s ' % fs_type if fs_type is not None else '',
+ part_start,
+ part_end
+ )
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # If partition exists, try to resize
+ if resize and part_exists(current_parts, 'num', number):
+ # Ensure new end is different to current
+ partition = [p for p in current_parts if p['num'] == number][0]
+ current_part_end = convert_to_bytes(partition['end'], unit)
+
+ size, parsed_unit = parse_unit(part_end, unit)
+ if parsed_unit == "%":
+ size = int((int(current_device['generic']['size']) * size) / 100)
+ parsed_unit = unit
+
+ desired_part_end = convert_to_bytes(size, parsed_unit)
+
+ if current_part_end != desired_part_end:
+ script += "resizepart %s %s " % (
+ number,
+ part_end
+ )
+
+ # Execute the script and update the data structure.
+ # This will create the partition for the next steps
+ if script:
+ output_script += script
+ parted(script, device, align)
+ changed = True
+ script = ""
+
+ if not module.check_mode:
+ current_parts = get_device_info(device, unit)['partitions']
+
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ if changed and module.check_mode:
+ partition = {'flags': []} # Empty structure for the check-mode
+ else:
+ partition = [p for p in current_parts if p['num'] == number][0]
+
+ # Assign name to the partition
+ if name is not None and partition.get('name', None) != name:
+ # Wrap double quotes in single quotes so the shell doesn't strip
+ # the double quotes as those need to be included in the arg
+ # passed to parted
+ script += 'name %s \'"%s"\' ' % (number, name)
+
+ # Manage flags
+ if flags:
+ # Parted infers boot with esp, if you assign esp, boot is set
+ # and if boot is unset, esp is also unset.
+ if 'esp' in flags and 'boot' not in flags:
+ flags.append('boot')
+
+ # Compute only the changes in flags status
+ flags_off = list(set(partition['flags']) - set(flags))
+ flags_on = list(set(flags) - set(partition['flags']))
+
+ for f in flags_on:
+ script += "set %s %s on " % (number, f)
+
+ for f in flags_off:
+ script += "set %s %s off " % (number, f)
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # Execute the script
+ if script:
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'absent':
+ # Remove the partition
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ script = "rm %s " % number
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'info':
+ output_script = "unit '%s' print " % unit
+
+ # Final status of the device
+ final_device_status = get_device_info(device, unit)
+ module.exit_json(
+ changed=changed,
+ disk=final_device_status['generic'],
+ partitions=final_device_status['partitions'],
+ script=output_script.strip()
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py
new file mode 100644
index 00000000..1bee180b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pids
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ community.general.pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ ansible.builtin.debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py
new file mode 100644
index 00000000..db8c0ec8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner.
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ type: str
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ type: str
+ modulepath:
+ description:
+ - Path to an alternate location for puppet modules.
+ type: str
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ type: str
+ noop:
+ description:
+ - Override puppet.conf noop mode.
+ - When C(yes), run Puppet agent with C(--noop) switch set.
+ - When C(no), run Puppet agent with C(--no-noop) switch set.
+ - When unset (default), use default or puppet.conf value if defined.
+ type: bool
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts.
+ type: dict
+ facter_basename:
+ description:
+ - Basename of the facter output file.
+ type: str
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ type: str
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used.
+ - C(all) will go to both C(stdout) and C(syslog).
+ type: str
+ choices: [ all, stdout, syslog ]
+ default: stdout
+ certname:
+ description:
+ - The name to use when handling certificates.
+ type: str
+ tags:
+ description:
+ - A list of puppet tags to be used.
+ type: list
+ elements: str
+ execute:
+ description:
+ - Execute a specific piece of Puppet code.
+ - It has no effect with a puppetmaster.
+ type: str
+ use_srv_records:
+ description:
+ - Toggles use_srv_records flag
+ type: bool
+ summarize:
+ description:
+ - Whether to print a transaction summary.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Print extra information.
+ type: bool
+ default: false
+ debug:
+ description:
+ - Enable full debugging.
+ type: bool
+ default: false
+requirements:
+- puppet
+author:
+- Monty Taylor (@emonty)
+'''
+
+EXAMPLES = r'''
+- name: Run puppet agent and fail if anything goes wrong
+ community.general.puppet:
+
+- name: Run puppet and timeout in 5 minutes
+ community.general.puppet:
+ timeout: 5m
+
+- name: Run puppet using a different environment
+ community.general.puppet:
+ environment: testing
+
+- name: Run puppet using a specific certname
+ community.general.puppet:
+ certname: agent01.example.com
+
+- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
+ community.general.puppet:
+ execute: include ::mymodule
+
+- name: Run puppet using a specific tags
+ community.general.puppet:
+ tags:
+ - update
+ - nginx
+
+- name: Run puppet agent in noop mode
+ community.general.puppet:
+ noop: yes
+
+- name: Run a manifest with debug, log to both syslog and stdout, specify module path
+ community.general.puppet:
+ modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ logdest: all
+ manifest: /var/lib/example/puppet_step_config.pp
+'''
+
+import json
+import os
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def _get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(type='str', default='30m'),
+ puppetmaster=dict(type='str'),
+ modulepath=dict(type='str'),
+ manifest=dict(type='str'),
+ noop=dict(required=False, type='bool'),
+ logdest=dict(type='str', default='stdout', choices=['all',
+ 'stdout',
+ 'syslog']),
+ # internal code to work with --diff, do not use
+ show_diff=dict(type='bool', default=False, aliases=['show-diff']),
+ facts=dict(type='dict'),
+ facter_basename=dict(type='str', default='ansible'),
+ environment=dict(type='str'),
+ certname=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ execute=dict(type='str'),
+ summarize=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ use_srv_records=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ('puppetmaster', 'modulepath'),
+ ],
+ )
+ p = module.params
+
+ global PUPPET_CMD
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
+
+ if not PUPPET_CMD:
+ module.fail_json(
+ msg="Could not find puppet. Please ensure it is installed.")
+
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ rc, stdout, stderr = module.run_command(
+ PUPPET_CMD + " config print agent_disabled_lockfile")
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ _get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=shlex_quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
+
+ if not p['manifest'] and not p['execute']:
+ cmd = ("%(base_cmd)s agent --onetime"
+ " --no-daemonize --no-usecacheonfailure --no-splay"
+ " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
+ if p['puppetmaster']:
+ cmd += " --server %s" % shlex_quote(p['puppetmaster'])
+ if p['show_diff']:
+ cmd += " --show_diff"
+ if p['environment']:
+ cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if module.check_mode:
+ cmd += " --noop"
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['use_srv_records'] is not None:
+ if not p['use_srv_records']:
+ cmd += " --no-use_srv_records"
+ else:
+ cmd += " --use_srv_records"
+ else:
+ cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
+ if p['logdest'] == 'all':
+ cmd += " --logdest syslog --logdest stdout"
+ if p['modulepath']:
+ cmd += "--modulepath='%s'" % p['modulepath']
+ if p['environment']:
+ cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if module.check_mode:
+ cmd += "--noop "
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ else:
+ cmd += " %s" % shlex_quote(p['manifest'])
+ if p['summarize']:
+ cmd += " --summarize"
+ if p['debug']:
+ cmd += " --debug"
+ if p['verbose']:
+ cmd += " --verbose"
+ rc, stdout, stderr = module.run_command(cmd)
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py
new file mode 100644
index 00000000..b80ed8cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: runit
+author:
+- James Sumners (@jsumners)
+short_description: Manage runit services
+description:
+ - Controls runit services on remote hosts using the sv utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: yes
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ service (sv restart) and C(killed) will always bounce the service (sv force-stop).
+ C(reloaded) will send a HUP (sv reload).
+ C(once) will run a normally downed sv once (sv once), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ type: bool
+ service_dir:
+ description:
+ - directory runsv watches for services
+ type: str
+ default: /var/service
+ service_src:
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/sv
+'''
+
+EXAMPLES = r'''
+- name: Start sv dnscache, if not running
+ community.general.runit:
+ name: dnscache
+ state: started
+
+- name: Stop sv dnscache, if running
+ community.general.runit:
+ name: dnscache
+ state: stopped
+
+- name: Kill sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: killed
+
+- name: Restart sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: restarted
+
+- name: Reload sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+
+- name: Use alternative sv directory location
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+ service_dir: /run/service
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs): # @FIXME remove unused function?
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Sv(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = []
+ self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
+ self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.get_status()
+ else:
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+ # full_state *may* contain information about the logger:
+ # "down: /etc/service/service-without-logger: 1s, normally up\n"
+ # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
+ full_state_no_logger = self.full_state.split("; ")[0]
+
+ m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r' (\d+)s', full_state_no_logger)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(r'^run:', full_state_no_logger):
+ self.state = 'started'
+ elif re.search(r'^down:', full_state_no_logger):
+ self.state = 'stopped'
+ else:
+ self.state = 'unknown'
+ return
+
+ def started(self):
+ return self.start()
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, 'start', self.svc_full])
+
+ def stopped(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, 'once', self.svc_full])
+
+ def reloaded(self):
+ return self.reload()
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
+
+ def restarted(self):
+ return self.restart()
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
+
+ def killed(self):
+ return self.kill()
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e))
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ dist=dict(type='str', default='runit'), # @FIXME unused param?
+ service_dir=dict(type='str', default='/var/service'),
+ service_src=dict(type='str', default='/etc/sv'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+
+ sv = Sv(module)
+ changed = False
+ orig_state = sv.report()
+
+ if enabled is not None and enabled != sv.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ sv.enable()
+ else:
+ sv.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != sv.state:
+ changed = True
+ if not module.check_mode:
+ getattr(sv, state)()
+
+ module.exit_json(changed=changed, sv=sv.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py
new file mode 100644
index 00000000..457e2e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+- Manages SELinux file context mapping definitions.
+- Similar to the C(semanage fcontext) command.
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: yes
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ type: str
+ required: yes
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: no
+notes:
+- The changes are persistent across reboots.
+- The M(community.general.sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ community.general.sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+
+- name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())),
+ setype=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py
new file mode 100644
index 00000000..0d1f9f59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove a domain from the list of permissive domains.
+options:
+ domain:
+ description:
+ - The domain that will be added or removed from the list of permissive domains.
+ type: str
+ required: true
+ default: ''
+ aliases: [ name ]
+ permissive:
+ description:
+ - Indicate if the domain should or should not be set as permissive.
+ type: bool
+ required: true
+ no_reload:
+ description:
+ - Disable reloading of the SELinux policy after making change to a domain's permissive setting.
+ - The default is C(no), which causes policy to be reloaded when a domain changes state.
+ - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
+ type: bool
+ default: no
+ store:
+ description:
+ - Name of the SELinux policy store to use.
+ type: str
+notes:
+ - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
+requirements: [ policycoreutils-python ]
+author:
+- Michael Scherer (@mscherer) <misc@zarb.org>
+'''
+
+EXAMPLES = r'''
+- name: Change the httpd_t domain to permissive
+ community.general.selinux_permissive:
+ name: httpd_t
+ permissive: true
+'''
+
+import traceback
+
+HAVE_SEOBJECT = False
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True, aliases=['name']),
+ store=dict(type='str', default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
+ exception=SEOBJECT_IMP_ERR)
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py
new file mode 100644
index 00000000..7036dad9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+options:
+ login:
+ type: str
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ type: str
+ description:
+ - SELinux user name
+ selevel:
+ type: str
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ type: str
+ description:
+ - Desired mapping value.
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+- name: Modify the default user on the system to the guest_u user
+ community.general.selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+- name: Assign gijoe user on an MLS machine a range and to the staff_u user
+ community.general.selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+- name: Assign all users in the engineering group to the staff_u user
+ community.general.selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py
new file mode 100644
index 00000000..71df8d6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+options:
+ ports:
+ description:
+ - Ports or port ranges.
+ - Can be a list (since 2.6) or comma separated string.
+ type: list
+ elements: str
+ required: true
+ proto:
+ description:
+ - Protocol for the specified port.
+ type: str
+ required: true
+ choices: [ tcp, udp ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ type: str
+ required: true
+ state:
+ description:
+ - Desired boolean value.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: no
+notes:
+ - The changes are persistent across reboots.
+ - Not tested on any debian based system.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dan Keder (@dankeder)
+'''
+
+EXAMPLES = r'''
+- name: Allow Apache to listen on tcp port 8888
+ community.general.seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
+- name: Allow sshd to listen on tcp port 8991
+ community.general.seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+'''
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ if isinstance(port, str):
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ else:
+ ports = (port, port)
+
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ ports=dict(type='list', elements='str', required=True),
+ proto=dict(type='str', required=True, choices=['tcp', 'udp']),
+ setype=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = module.params['ports']
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py
new file mode 100644
index 00000000..ccb02a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: shutdown
+short_description: Shut down a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Shut downs a machine.
+version_added: "1.1.0"
+options:
+ delay:
+ description:
+ - Seconds to wait before shutdown. Passed as a parameter to the shutdown command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ msg:
+ description:
+ - Message to display to users before shutdown.
+ type: str
+ default: Shut down initiated by Ansible
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: path
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+
+seealso:
+- module: ansible.builtin.reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally shut down the machine with all defaults
+ community.general.shutdown:
+
+- name: Delay shutting down the remote node
+ community.general.shutdown:
+ delay: 60
+
+- name: Shut down a machine with shutdown command in unusual place
+ community.general.shutdown:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+shutdown:
+ description: C(true) if the machine has been shut down.
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py
new file mode 100644
index 00000000..8ecdeb8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones.
+ - This module does not currently allow changing of options for a zone that is already been created.
+author:
+- Paul Markham (@pmarkham)
+requirements:
+ - Solaris 10 or 11
+options:
+ state:
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ type: str
+ choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
+ default: present
+ name:
+ description:
+ - Zone name.
+ - A zone name must be unique name.
+ - A zone name must begin with an alpha-numeric character.
+ - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - The name cannot be longer than 64 characters.
+ type: str
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ type: str
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ type: bool
+ default: no
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ type: str
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ type: str
+ default: ''
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ type: str
+ default: ''
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ type: str
+ default: ''
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ type: str
+ default: ''
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ type: int
+ default: 600
+'''
+
+EXAMPLES = '''
+- name: Create and install a zone, but don't boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: True
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Create and install a zone and boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Boot an already installed zone
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+
+- name: Stop a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: stopped
+
+- name: Destroy a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: absent
+
+- name: Detach a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: detached
+
+- name: Configure a zone, ready to be attached
+ community.general.solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Attach zone1
+ community.general.solaris_zone:
+ name: zone1
+ state: attached
+ attach_options: -u
+'''
+
+import os
+import platform
+import re
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
+ if not match:
+ self.module.fail_json(msg="Provided zone name is not a valid zone name. "
+ "Please refer documentation for correct zone name specifications.")
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
+ path=dict(type='str'),
+ sparse=dict(type='bool', default=False),
+ root_password=dict(type='str', no_log=True),
+ timeout=dict(type='int', default=600),
+ config=dict(type='str', default=''),
+ create_options=dict(type='str', default=''),
+ install_options=dict(type='str', default=''),
+ attach_options=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py
new file mode 100644
index 00000000..e9215670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: svc
+author:
+- Brian Coca (@bcoca)
+short_description: Manage daemontools services
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ downed:
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ Defaults to no. Downed does not imply stopped.
+ type: bool
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ Take note that a service can be enabled and downed (no auto restart).
+ type: bool
+ service_dir:
+ description:
+ - Directory svscan watches for services
+ type: str
+ default: /service
+ service_src:
+ description:
+ - Directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/service
+'''
+
+EXAMPLES = '''
+- name: Start svc dnscache, if not running
+ community.general.svc:
+ name: dnscache
+ state: started
+
+- name: Stop svc dnscache, if running
+ community.general.svc:
+ name: dnscache
+ state: stopped
+
+- name: Kill svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: killed
+
+- name: Restart svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: restarted
+
+- name: Reload svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+
+- name: Using alternative svc directory location
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
+'''
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = ['/command', '/usr/local/bin']
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+ self.execute_command([self.svc_cmd, '-dx', self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd, '-dx', src_log])
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search(r'\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r'(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ downed=dict(type='bool'),
+ service_dir=dict(type='str', default='/service'),
+ service_src=dict(type='str', default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc, state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py
new file mode 100644
index 00000000..2483fb36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019-2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syspatch
+
+short_description: Manage OpenBSD system patches
+
+
+description:
+ - "Manage OpenBSD system patches using syspatch."
+
+options:
+ apply:
+ type: bool
+ description:
+ - Apply all available system patches.
+ - By default, apply all patches.
+ - Deprecated. Will be removed in community.general 3.0.0.
+ default: yes
+ revert:
+ description:
+ - Revert system patches.
+ type: str
+ choices: [ all, one ]
+
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = '''
+- name: Apply all available system patches
+ community.general.syspatch:
+ apply: true
+
+- name: Revert last patch
+ community.general.syspatch:
+ revert: one
+
+- name: Revert all patches
+ community.general.syspatch:
+ revert: all
+
+# NOTE: You can reboot automatically if a patch requires it:
+- name: Apply all patches and store result
+ community.general.syspatch:
+ apply: true
+ register: syspatch
+
+- name: Reboot if patch requires it
+ ansible.builtin.reboot:
+ when: syspatch.reboot_needed
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+stdout:
+ description: syspatch standard output.
+ returned: always
+ type: str
+ sample: "001_rip6cksum"
+stderr:
+ description: syspatch standard error.
+ returned: always
+ type: str
+ sample: "syspatch: need root privileges"
+reboot_needed:
+ description: Whether or not a reboot is required after an update.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ apply=dict(type='bool', default=True, removed_in_version='3.0.0', removed_from_collection='community.general'),
+ revert=dict(type='str', choices=['all', 'one'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_one_of=[['apply', 'revert']]
+ )
+
+ result = syspatch_run(module)
+
+ module.exit_json(**result)
+
+
+def syspatch_run(module):
+ cmd = module.get_bin_path('syspatch', True)
+ changed = False
+ reboot_needed = False
+ warnings = []
+
+ # Set safe defaults for run_flag and check_flag
+ run_flag = ['-c']
+ check_flag = ['-c']
+ if module.params['revert']:
+ check_flag = ['-l']
+
+ if module.params['revert'] == 'all':
+ run_flag = ['-R']
+ else:
+ run_flag = ['-r']
+ elif module.params['apply']:
+ check_flag = ['-c']
+ run_flag = []
+
+ # Run check command
+ rc, out, err = module.run_command([cmd] + check_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+
+ if len(out) > 0:
+ # Changes pending
+ change_pending = True
+ else:
+ # No changes pending
+ change_pending = False
+
+ if module.check_mode:
+ changed = change_pending
+ elif change_pending:
+ rc, out, err = module.run_command([cmd] + run_flag)
+
+ # Workaround syspatch ln bug:
+ # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
+ if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('create unique kernel') >= 0:
+ # Kernel update applied
+ reboot_needed = True
+ elif out.lower().find('syspatch updated itself') >= 0:
+ warnings.append('Syspatch was updated. Please run syspatch again.')
+
+ # If no stdout, then warn user
+ if len(out) == 0:
+ warnings.append('syspatch had suggested changes, but stdout was empty.')
+
+ changed = True
+ else:
+ changed = False
+
+ return dict(
+ changed=changed,
+ reboot_needed=reboot_needed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py
new file mode 100644
index 00000000..a1956129
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sysupgrade
+short_description: Manage OpenBSD system upgrades
+version_added: 1.1.0
+description:
+ - Manage OpenBSD system upgrades using sysupgrade.
+options:
+ snapshot:
+ description:
+ - Apply the latest snapshot.
+ - Otherwise release will be applied.
+ default: no
+ type: bool
+ force:
+ description:
+ - Force upgrade (for snapshots only).
+ default: no
+ type: bool
+ keep_files:
+ description:
+ - Keep the files under /home/_sysupgrade.
+ - By default, the files will be deleted after the upgrade.
+ default: no
+ type: bool
+ fetch_only:
+ description:
+ - Fetch and verify files and create /bsd.upgrade but do not reboot.
+ - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ default: yes
+ type: bool
+ installurl:
+ description:
+ - OpenBSD mirror top-level URL for fetching an upgrade.
+ - By default, the mirror URL is pulled from /etc/installurl.
+ type: str
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = r'''
+- name: Upgrade to latest release
+ community.general.sysupgrade:
+ register: sysupgrade
+
+- name: Upgrade to latest snapshot
+ community.general.sysupgrade:
+ snapshot: yes
+ installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD
+ register: sysupgrade
+
+- name: Reboot to apply upgrade if needed
+ ansible.builtin.reboot:
+ when: sysupgrade.changed
+
+# Note: Ansible will error when running this way due to how
+# the reboot is forcefully handled by sysupgrade:
+
+- name: Have sysupgrade automatically reboot
+ community.general.sysupgrade:
+ fetch_only: no
+ ignore_errors: yes
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+stdout:
+ description: Sysupgrade standard output.
+ returned: always
+ type: str
+stderr:
+ description: Sysupgrade standard error.
+ returned: always
+ type: str
+ sample: "sysupgrade: need root privileges"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sysupgrade_run(module):
+ sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
+ cmd = [sysupgrade_bin]
+ changed = False
+ warnings = []
+
+ # Setup command flags
+ if module.params['snapshot']:
+ run_flag = ['-s']
+ if module.params['force']:
+ # Force only applies to snapshots
+ run_flag.append('-f')
+ else:
+ # release flag
+ run_flag = ['-r']
+
+ if module.params['keep_files']:
+ run_flag.append('-k')
+
+ if module.params['fetch_only']:
+ run_flag.append('-n')
+
+ # installurl must be the last argument
+ if module.params['installurl']:
+ run_flag.append(module.params['installurl'])
+
+ rc, out, err = module.run_command(cmd + run_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('already on latest snapshot') >= 0:
+ changed = False
+ elif out.lower().find('upgrade on next reboot') >= 0:
+ changed = True
+
+ return dict(
+ changed=changed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ snapshot=dict(type='bool', default=False),
+ fetch_only=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ keep_files=dict(type='bool', default=False),
+ installurl=dict(type='str'),
+ ),
+ supports_check_mode=False,
+ )
+ return_dict = sysupgrade_run(module)
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py
new file mode 100644
index 00000000..d10dd9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py
@@ -0,0 +1,905 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock.
+ If you want to set up the NTP, use M(ansible.builtin.service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ community.general.timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py
new file mode 100644
index 00000000..c6df6fe6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright: (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ default: false
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: '0.2.0'
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: '0.2.0'
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ default: false
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ community.general.ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ community.general.ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- community.general.ufw:
+ rule: reject
+ port: auth
+ log: yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- community.general.ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- community.general.ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ community.general.ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
+
+- name: Deny all access to port 53
+ community.general.ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ community.general.ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ community.general.ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ community.general.ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ community.general.ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ community.general.ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
+ has_ipv6 = any([ipv6 for (no, ipv6) in lines])
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py
new file mode 100644
index 00000000..15fd9c62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Bryan Gurney (@bgurney-rh)
+
+module: vdo
+
+short_description: Module to control VDO
+
+
+description:
+ - This module controls the VDO dedupe and compression device.
+ - VDO, or Virtual Data Optimizer, is a device-mapper target that
+ provides inline block-level deduplication, compression, and
+ thin provisioning capabilities to primary storage.
+
+options:
+ name:
+ description:
+ - The name of the VDO volume.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether this VDO volume should be "present" or "absent".
+ If a "present" VDO volume does not exist, it will be
+ created. If a "present" VDO volume already exists, it
+ will be modified, by updating the configuration, which
+ will take effect when the VDO volume is restarted.
+ Not all parameters of an existing VDO volume can be
+ modified; the "statusparamkeys" list contains the
+ parameters that can be modified after creation. If an
+ "absent" VDO volume does not exist, it will not be
+ removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ activated:
+ description:
+ - The "activate" status for a VDO volume. If this is set
+ to "no", the VDO volume cannot be started, and it will
+ not start on system startup. However, on initial
+ creation, a VDO volume with "activated" set to "off"
+ will be running, until stopped. This is the default
+ behavior of the "vdo create" command; it provides the
+ user an opportunity to write a base amount of metadata
+ (filesystem, LVM headers, etc.) to the VDO volume prior
+ to stopping the volume, and leaving it deactivated
+ until ready to use.
+ type: bool
+ running:
+ description:
+ - Whether this VDO volume is running.
+ - A VDO volume must be activated in order to be started.
+ type: bool
+ device:
+ description:
+ - The full path of the device to use for VDO storage.
+ - This is required if "state" is "present".
+ type: str
+ logicalsize:
+ description:
+ - The logical size of the VDO volume (in megabytes, or
+ LVM suffix format). If not specified for a new volume,
+ this defaults to the same size as the underlying storage
+ device, which is specified in the 'device' parameter.
+ Existing volumes will maintain their size if the
+ logicalsize parameter is not specified, or is smaller
+ than or identical to the current size. If the specified
+ size is larger than the current size, a growlogical
+ operation will be performed.
+ type: str
+ deduplication:
+ description:
+ - Configures whether deduplication is enabled. The
+ default for a created volume is 'enabled'. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ compression:
+ description:
+ - Configures whether compression is enabled. The default
+ for a created volume is 'enabled'. Existing volumes
+ will maintain their previously configured setting unless
+ a different value is specified in the playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ blockmapcachesize:
+ description:
+ - The amount of memory allocated for caching block map
+ pages, in megabytes (or may be issued with an LVM-style
+ suffix of K, M, G, or T). The default (and minimum)
+ value is 128M. The value specifies the size of the
+ cache; there is a 15% memory usage overhead. Each 1.25G
+ of block map covers 1T of logical blocks, therefore a
+ small amount of block map cache memory can cache a
+ significantly large amount of block map data. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ readcache:
+ description:
+ - Enables or disables the read cache. The default is
+ 'disabled'. Choosing 'enabled' enables a read cache
+ which may improve performance for workloads of high
+ deduplication, read workloads with a high level of
+ compression, or on hard disk storage. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ choices: [ disabled, enabled ]
+ readcachesize:
+ description:
+ - Specifies the extra VDO device read cache size in
+ megabytes. This is in addition to a system-defined
+ minimum. Using a value with a suffix of K, M, G, or T
+ is optional. The default value is 0. 1.125 MB of
+ memory per bio thread will be used per 1 MB of read
+ cache specified (for example, a VDO volume configured
+ with 4 bio threads will have a read cache memory usage
+ overhead of 4.5 MB per 1 MB of read cache specified).
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ emulate512:
+ description:
+ - Enables 512-byte emulation mode, allowing drivers or
+ filesystems to access the VDO volume at 512-byte
+ granularity, instead of the default 4096-byte granularity.
+ Default is 'disabled'; only recommended when a driver
+ or filesystem requires 512-byte sector level access to
+ a device. This option is only available when creating
+ a new volume, and cannot be changed for an existing
+ volume.
+ type: bool
+ default: false
+ growphysical:
+ description:
+ - Specifies whether to attempt to execute a growphysical
+ operation, if there is enough unused space on the
+ device. A growphysical operation will be executed if
+ there is at least 64 GB of free space, relative to the
+ previous physical size of the affected VDO volume.
+ type: bool
+ default: false
+ slabsize:
+ description:
+ - The size of the increment by which the physical size of
+ a VDO volume is grown, in megabytes (or may be issued
+ with an LVM-style suffix of K, M, G, or T). Must be a
+ power of two between 128M and 32G. The default is 2G,
+ which supports volumes having a physical size up to 16T.
+ The maximum, 32G, supports a physical size of up to 256T.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ writepolicy:
+ description:
+ - Specifies the write policy of the VDO volume. The
+ 'sync' mode acknowledges writes only after data is on
+ stable storage. The 'async' mode acknowledges writes
+ when data has been cached for writing to stable
+ storage. The default (and highly recommended) 'auto'
+ mode checks the storage device to determine whether it
+ supports flushes. Devices that support flushes will
+ result in a VDO volume in 'async' mode, while devices
+ that do not support flushes will run in sync mode.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is
+ specified in the playbook.
+ type: str
+ choices: [ async, auto, sync ]
+ indexmem:
+ description:
+ - Specifies the amount of index memory in gigabytes. The
+ default is 0.25. The special decimal values 0.25, 0.5,
+ and 0.75 can be used, as can any positive integer.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ indexmode:
+ description:
+ - Specifies the index mode of the Albireo index. The
+ default is 'dense', which has a deduplication window of
+ 1 GB of index memory per 1 TB of incoming data,
+ requiring 10 GB of index data on persistent storage.
+ The 'sparse' mode has a deduplication window of 1 GB of
+ index memory per 10 TB of incoming data, but requires
+ 100 GB of index data on persistent storage. This option
+ is only available when creating a new volume, and cannot
+ be changed for an existing volume.
+ type: str
+ choices: [ dense, sparse ]
+ ackthreads:
+ description:
+ - Specifies the number of threads to use for
+ acknowledging completion of requested VDO I/O operations.
+ Valid values are integer values from 1 to 100 (lower
+ numbers are preferable due to overhead). The default is
+ 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ biothreads:
+ description:
+ - Specifies the number of threads to use for submitting I/O
+ operations to the storage device. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 4.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ cputhreads:
+ description:
+ - Specifies the number of threads to use for CPU-intensive
+ work such as hashing or compression. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 2.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ logicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on logical
+ block addresses. Valid values are integer values from
+ 1 to 100 (lower numbers are preferable due to overhead).
+ The default is 1. Existing volumes will maintain their
+ previously configured setting unless a different value
+ is specified in the playbook.
+ type: str
+ physicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on physical
+ block addresses. Valid values are integer values from
+ 1 to 16 (lower numbers are preferable due to overhead).
+ The physical space used by the VDO volume must be
+ larger than (slabsize * physicalthreads). The default
+ is 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+notes:
+ - In general, the default thread configuration should be used.
+requirements:
+ - PyYAML
+ - kmod-kvdo
+ - vdo
+'''
+
+EXAMPLES = r'''
+- name: Create 2 TB VDO volume vdo1 on device /dev/md0
+ community.general.vdo:
+ name: vdo1
+ state: present
+ device: /dev/md0
+ logicalsize: 2T
+
+- name: Remove VDO volume vdo1
+ community.general.vdo:
+ name: vdo1
+ state: absent
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import re
+import traceback
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+
+# Generate a list of VDO volumes, whether they are running or stopped.
+#
+# @param module The AnsibleModule object.
+# @param vdocmd The path of the 'vdo' command.
+#
+# @return vdolist A list of currently created VDO volumes.
+def inventory_vdos(module, vdocmd):
+ rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
+
+ # if rc != 0:
+ # module.fail_json(msg="Inventorying VDOs failed: %s"
+ # % vdostatusout, rc=rc, err=err)
+
+ vdolist = []
+
+ if (rc == 2 and
+ re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
+ # If there is no /etc/vdoconf.yml file, assume there are no
+ # VDO volumes. Return an empty list of VDO volumes.
+ return vdolist
+
+ if rc != 0:
+ module.fail_json(msg="Inventorying VDOs failed: %s"
+ % vdostatusout, rc=rc, err=err)
+
+ vdostatusyaml = yaml.load(vdostatusout)
+ if vdostatusyaml is None:
+ return vdolist
+
+ vdoyamls = vdostatusyaml['VDOs']
+
+ if vdoyamls is not None:
+ vdolist = vdoyamls.keys()
+
+ return vdolist
+
+
+def list_running_vdos(module, vdocmd):
+ rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
+ runningvdolist = filter(None, vdolistout.split('\n'))
+ return runningvdolist
+
+
+# Generate a string containing options to pass to the 'VDO' command.
+# Note that a 'create' operation will pass more options than a
+# 'modify' operation.
+#
+# @param params A dictionary of parameters, and their values
+# (values of 'None' and/or nonexistent values are ignored).
+#
+# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
+def start_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("started VDO volume %s" % vdoname)
+
+ return rc
+
+
+def stop_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("stopped VDO volume %s" % vdoname)
+
+ return rc
+
+
+def activate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s activate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("activated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def deactivate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s deactivate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("deactivated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def add_vdooptions(params):
+ vdocmdoptions = ""
+ options = []
+
+ if ('logicalsize' in params) and (params['logicalsize'] is not None):
+ options.append("--vdoLogicalSize=" + params['logicalsize'])
+
+ if (('blockmapcachesize' in params) and
+ (params['blockmapcachesize'] is not None)):
+ options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
+
+ if ('readcache' in params) and (params['readcache'] == 'enabled'):
+ options.append("--readCache=enabled")
+
+ if ('readcachesize' in params) and (params['readcachesize'] is not None):
+ options.append("--readCacheSize=" + params['readcachesize'])
+
+ if ('slabsize' in params) and (params['slabsize'] is not None):
+ options.append("--vdoSlabSize=" + params['slabsize'])
+
+ if ('emulate512' in params) and (params['emulate512']):
+ options.append("--emulate512=enabled")
+
+ if ('indexmem' in params) and (params['indexmem'] is not None):
+ options.append("--indexMem=" + params['indexmem'])
+
+ if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
+ options.append("--sparseIndex=enabled")
+
+ # Entering an invalid thread config results in a cryptic
+ # 'Could not set up device mapper for %s' error from the 'vdo'
+ # command execution. The dmsetup module on the system will
+ # output a more helpful message, but one would have to log
+ # onto that system to read the error. For now, heed the thread
+ # limit warnings in the DOCUMENTATION section above.
+ if ('ackthreads' in params) and (params['ackthreads'] is not None):
+ options.append("--vdoAckThreads=" + params['ackthreads'])
+
+ if ('biothreads' in params) and (params['biothreads'] is not None):
+ options.append("--vdoBioThreads=" + params['biothreads'])
+
+ if ('cputhreads' in params) and (params['cputhreads'] is not None):
+ options.append("--vdoCpuThreads=" + params['cputhreads'])
+
+ if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
+ options.append("--vdoLogicalThreads=" + params['logicalthreads'])
+
+ if (('physicalthreads' in params) and
+ (params['physicalthreads'] is not None)):
+ options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
+
+ vdocmdoptions = ' '.join(options)
+ return vdocmdoptions
+
+
+def run_module():
+
+ # Define the available arguments/parameters that a user can pass to
+ # the module.
+ # Defaults for VDO parameters are None, in order to facilitate
+ # the detection of parameters passed from the playbook.
+ # Creation param defaults are determined by the creation section.
+
+ module_args = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ activated=dict(type='bool'),
+ running=dict(type='bool'),
+ growphysical=dict(type='bool', default=False),
+ device=dict(type='str'),
+ logicalsize=dict(type='str'),
+ deduplication=dict(type='str', choices=['disabled', 'enabled']),
+ compression=dict(type='str', choices=['disabled', 'enabled']),
+ blockmapcachesize=dict(type='str'),
+ readcache=dict(type='str', choices=['disabled', 'enabled']),
+ readcachesize=dict(type='str'),
+ emulate512=dict(type='bool', default=False),
+ slabsize=dict(type='str'),
+ writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
+ indexmem=dict(type='str'),
+ indexmode=dict(type='str', choices=['dense', 'sparse']),
+ ackthreads=dict(type='str'),
+ biothreads=dict(type='str'),
+ cputhreads=dict(type='str'),
+ logicalthreads=dict(type='str'),
+ physicalthreads=dict(type='str')
+ )
+
+ # Seed the result dictionary in the object. There will be an
+ # 'invocation' dictionary added with 'module_args' (arguments
+ # given).
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
+
+ vdocmd = module.get_bin_path("vdo", required=True)
+ if not vdocmd:
+ module.fail_json(msg='VDO is not installed.', **result)
+
+ # Print a pre-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+
+ runningvdolist = list_running_vdos(module, vdocmd)
+
+ # Collect the name of the desired VDO volume, and its state. These will
+ # determine what to do.
+ desiredvdo = module.params['name']
+ state = module.params['state']
+
+ # Create a desired VDO volume that doesn't exist yet.
+ if (desiredvdo not in vdolist) and (state == 'present'):
+ device = module.params['device']
+ if device is None:
+ module.fail_json(msg="Creating a VDO volume requires specifying "
+ "a 'device' in the playbook.")
+
+ # Create a dictionary of the options from the AnsibleModule
+ # parameters, compile the vdo command options, and run "vdo create"
+ # with those options.
+ # Since this is a creation of a new VDO volume, it will contain all
+ # all of the parameters given by the playbook; the rest will
+ # assume default values.
+ options = module.params
+ vdocmdoptions = add_vdooptions(options)
+ rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
+ % (vdocmd, desiredvdo, device,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Creating VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if (module.params['compression'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableCompression --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if ((module.params['deduplication'] is not None) and
+ module.params['deduplication'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if module.params['activated'] == 'no':
+ deactivate_vdo(module, desiredvdo, vdocmd)
+
+ if module.params['running'] == 'no':
+ stop_vdo(module, desiredvdo, vdocmd)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("created VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # Modify the current parameters of a VDO that exists.
+ if (desiredvdo in vdolist) and (state == 'present'):
+ rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
+ vdostatusyaml = yaml.load(vdostatusoutput)
+
+ # An empty dictionary to contain dictionaries of VDO statistics
+ processedvdos = {}
+
+ vdoyamls = vdostatusyaml['VDOs']
+ if vdoyamls is not None:
+ processedvdos = vdoyamls
+
+ # The 'vdo status' keys that are currently modifiable.
+ statusparamkeys = ['Acknowledgement threads',
+ 'Bio submission threads',
+ 'Block map cache size',
+ 'CPU-work threads',
+ 'Logical threads',
+ 'Physical threads',
+ 'Read cache',
+ 'Read cache size',
+ 'Configured write policy',
+ 'Compression',
+ 'Deduplication']
+
+ # A key translation table from 'vdo status' output to Ansible
+ # module parameters. This covers all of the 'vdo status'
+ # parameter keys that could be modified with the 'vdo'
+ # command.
+ vdokeytrans = {
+ 'Logical size': 'logicalsize',
+ 'Compression': 'compression',
+ 'Deduplication': 'deduplication',
+ 'Block map cache size': 'blockmapcachesize',
+ 'Read cache': 'readcache',
+ 'Read cache size': 'readcachesize',
+ 'Configured write policy': 'writepolicy',
+ 'Acknowledgement threads': 'ackthreads',
+ 'Bio submission threads': 'biothreads',
+ 'CPU-work threads': 'cputhreads',
+ 'Logical threads': 'logicalthreads',
+ 'Physical threads': 'physicalthreads'
+ }
+
+ # Build a dictionary of the current VDO status parameters, with
+ # the keys used by VDO. (These keys will be converted later.)
+ currentvdoparams = {}
+
+ # Build a "lookup table" dictionary containing a translation table
+ # of the parameters that can be modified
+ modtrans = {}
+
+ for statfield in statusparamkeys:
+ if statfield in processedvdos[desiredvdo]:
+ currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
+
+ modtrans[statfield] = vdokeytrans[statfield]
+
+ # Build a dictionary of current parameters formatted with the
+ # same keys as the AnsibleModule parameters.
+ currentparams = {}
+ for paramkey in modtrans.keys():
+ currentparams[modtrans[paramkey]] = modtrans[paramkey]
+
+ diffparams = {}
+
+ # Check for differences between the playbook parameters and the
+ # current parameters. This will need a comparison function;
+ # since AnsibleModule params are all strings, compare them as
+ # strings (but if it's None; skip).
+ for key in currentparams.keys():
+ if module.params[key] is not None:
+ if str(currentparams[key]) != module.params[key]:
+ diffparams[key] = module.params[key]
+
+ if diffparams:
+ vdocmdoptions = add_vdooptions(diffparams)
+ if vdocmdoptions:
+ rc, out, err = module.run_command("%s modify --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Modifying VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'deduplication' in diffparams.keys():
+ dedupemod = diffparams['deduplication']
+ if dedupemod == 'disabled':
+ rc, out, err = module.run_command("%s "
+ "disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if dedupemod == 'enabled':
+ rc, out, err = module.run_command("%s "
+ "enableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'compression' in diffparams.keys():
+ compressmod = diffparams['compression']
+ if compressmod == 'disabled':
+ rc, out, err = module.run_command("%s disableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if compressmod == 'enabled':
+ rc, out, err = module.run_command("%s enableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'writepolicy' in diffparams.keys():
+ writepolmod = diffparams['writepolicy']
+ if writepolmod == 'auto':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'sync':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'async':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Process the size parameters, to determine of a growPhysical or
+ # growLogical operation needs to occur.
+ sizeparamkeys = ['Logical size', ]
+
+ currentsizeparams = {}
+ sizetrans = {}
+ for statfield in sizeparamkeys:
+ currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
+ sizetrans[statfield] = vdokeytrans[statfield]
+
+ sizeparams = {}
+ for paramkey in currentsizeparams.keys():
+ sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
+
+ diffsizeparams = {}
+ for key in sizeparams.keys():
+ if module.params[key] is not None:
+ if str(sizeparams[key]) != module.params[key]:
+ diffsizeparams[key] = module.params[key]
+
+ if module.params['growphysical']:
+ physdevice = module.params['device']
+ rc, devsectors, err = module.run_command("blockdev --getsz %s"
+ % (physdevice))
+ devblocks = (int(devsectors) / 8)
+ dmvdoname = ('/dev/mapper/' + desiredvdo)
+ currentvdostats = (processedvdos[desiredvdo]
+ ['VDO statistics']
+ [dmvdoname])
+ currentphysblocks = currentvdostats['physical blocks']
+
+ # Set a growPhysical threshold to grow only when there is
+ # guaranteed to be more than 2 slabs worth of unallocated
+ # space on the device to use. For now, set to device
+ # size + 64 GB, since 32 GB is the largest possible
+ # slab size.
+ growthresh = devblocks + 16777216
+
+ if currentphysblocks > growthresh:
+ result['changed'] = True
+ rc, out, err = module.run_command("%s growPhysical --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if 'logicalsize' in diffsizeparams.keys():
+ result['changed'] = True
+ vdocmdoptions = ("--vdoLogicalSize=" +
+ diffsizeparams['logicalsize'])
+ rc, out, err = module.run_command("%s growLogical --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+
+ vdoactivatestatus = processedvdos[desiredvdo]['Activate']
+
+ if ((module.params['activated'] == 'no') and
+ (vdoactivatestatus == 'enabled')):
+ deactivate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['activated'] == 'yes') and
+ (vdoactivatestatus == 'disabled')):
+ activate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['running'] == 'no') and
+ (desiredvdo in runningvdolist)):
+ stop_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Note that a disabled VDO volume cannot be started by the
+ # 'vdo start' command, by design. To accurately track changed
+ # status, don't try to start a disabled VDO volume.
+ # If the playbook contains 'activated: yes', assume that
+ # the activate_vdo() operation succeeded, as 'vdoactivatestatus'
+ # will have the activated status prior to the activate_vdo()
+ # call.
+ if (((vdoactivatestatus == 'enabled') or
+ (module.params['activated'] == 'yes')) and
+ (module.params['running'] == 'yes') and
+ (desiredvdo not in runningvdolist)):
+ start_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ if diffparams:
+ module.log("modified parameters of VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+ # Remove a desired VDO that currently exists.
+ if (desiredvdo in vdolist) and (state == 'absent'):
+ rc, out, err = module.run_command("%s remove --name=%s"
+ % (vdocmd, desiredvdo))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Removing VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("removed VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # fall through
+ # The state for the desired VDO volume was absent, and it does
+ # not exist. Print a post-run list of VDO volumes in the result
+ # object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("received request to remove non-existent VDO volume %s"
+ % desiredvdo)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py
new file mode 100644
index 00000000..8d0700ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Joseph Benden <joe@benden.us>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf
+author:
+ - "Joseph Benden (@jbenden)"
+ - "Alexei Znamensky (@russoz)"
+short_description: Edit XFCE4 Configurations
+description:
+ - This module allows for the manipulation of Xfce 4 Configuration via
+ xfconf-query. Please see the xfconf-query(1) man pages for more details.
+options:
+ channel:
+ description:
+ - A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored. See man xfconf-query(1)
+ required: yes
+ type: str
+ property:
+ description:
+ - A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference. See man xfconf-query(1)
+ required: yes
+ type: str
+ value:
+ description:
+ - Preference properties typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". For array mode, use a list of values. See man xfconf-query(1)
+ type: list
+ elements: raw
+ value_type:
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ For array mode, use a list of types.
+ type: list
+ elements: str
+ choices: [ int, uint, bool, float, double, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the property/value.
+ choices: [ get, present, absent ]
+ default: "present"
+ force_array:
+ description:
+ - Force array even if only one element
+ type: bool
+ default: 'no'
+ aliases: ['array']
+ version_added: 1.0.0
+'''
+
+EXAMPLES = """
+- name: Change the DPI to "192"
+ xfconf:
+ channel: "xsettings"
+ property: "/Xft/DPI"
+ value_type: "int"
+ value: "192"
+
+- name: Set workspace names (4)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main', 'Work1', 'Work2', 'Tmp']
+
+- name: Set workspace names (1)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main']
+ force_array: yes
+"""
+
+RETURN = '''
+ channel:
+ description: The channel specified in the module parameters
+ returned: success
+ type: str
+ sample: "xsettings"
+ property:
+ description: The property specified in the module parameters
+ returned: success
+ type: str
+ sample: "/Xft/DPI"
+ value_type:
+ description:
+ - The type of the value that was changed (C(none) for C(get) and C(reset)
+ state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"int" or ["str", "str", "str"]'
+ value:
+ description:
+ - The value of the preference key after executing the module. Either a
+ single string value or a list of strings for array types.
+ returned: success
+ type: string or list of strings
+ sample: '"192" or ["orange", "yellow", "violet"]'
+ previous_value:
+ description:
+ - The value of the preference key before executing the module (C(none) for
+ C(get) state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"96" or ["red", "blue", "green"]'
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ModuleHelper, CmdMixin, StateMixin, ArgFormat
+)
+
+
+def fix_bool(value):
+ vl = value.lower()
+ return vl if vl in ("true", "false") else value
+
+
+@ArgFormat.stars_deco(1)
+def values_fmt(values, value_types):
+ result = []
+ for value, value_type in zip(values, value_types):
+ if value_type == 'bool':
+ value = fix_bool(value)
+ result.append('--type')
+ result.append('{0}'.format(value_type))
+ result.append('--set')
+ result.append('{0}'.format(value))
+ return result
+
+
+class XFConfException(Exception):
+ pass
+
+
+class XFConfProperty(CmdMixin, StateMixin, ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(default="present",
+ choices=("present", "get", "absent"),
+ type='str'),
+ channel=dict(required=True, type='str'),
+ property=dict(required=True, type='str'),
+ value_type=dict(required=False, type='list',
+ elements='str', choices=('int', 'uint', 'bool', 'float', 'double', 'string')),
+ value=dict(required=False, type='list', elements='raw'),
+ force_array=dict(default=False, type='bool', aliases=['array']),
+ ),
+ required_if=[('state', 'present', ['value', 'value_type'])],
+ required_together=[('value', 'value_type')],
+ supports_check_mode=True,
+ )
+
+ facts_name = "xfconf"
+ default_state = 'present'
+ command = 'xfconf-query'
+ command_args_formats = dict(
+ channel=dict(fmt=('--channel', '{0}'),),
+ property=dict(fmt=('--property', '{0}'),),
+ is_array=dict(fmt="--force-array", style=ArgFormat.BOOLEAN),
+ reset=dict(fmt="--reset", style=ArgFormat.BOOLEAN),
+ create=dict(fmt="--create", style=ArgFormat.BOOLEAN),
+ values_and_types=dict(fmt=values_fmt)
+ )
+
+ def update_xfconf_output(self, **kwargs):
+ self.update_output(**kwargs)
+ self.update_facts(**kwargs)
+
+ def __init_module__(self):
+ self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'],
+ self.module.params['channel'])
+ self.vars.previous_value = self._get()
+ self.update_xfconf_output(property=self.module.params['property'],
+ channel=self.module.params['channel'],
+ previous_value=None)
+
+ def process_command_output(self, rc, out, err):
+ if err.rstrip() == self.does_not:
+ return None
+ if rc or len(err):
+ raise XFConfException('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
+
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+
+ return result
+
+ @property
+ def changed(self):
+ if self.vars.previous_value is None:
+ return self.vars.value is not None
+ elif self.vars.value is None:
+ return self.vars.previous_value is not None
+ else:
+ return set(self.vars.previous_value) != set(self.vars.value)
+
+ def _get(self):
+ return self.run_command(params=('channel', 'property'))
+
+ def state_get(self):
+ self.vars.value = self.vars.previous_value
+ self.update_xfconf_output(value=self.vars.value)
+
+ def state_absent(self):
+ self.vars.value = None
+ self.run_command(params=('channel', 'property', 'reset'), extra_params={"reset": True})
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=None)
+
+ def state_present(self):
+ # stringify all values - in the CLI they will all be happy strings anyway
+ # and by doing this here the rest of the code can be agnostic to it
+ self.vars.value = [str(v) for v in self.module.params['value']]
+ value_type = self.module.params['value_type']
+
+ values_len = len(self.vars.value)
+ types_len = len(value_type)
+
+ if types_len == 1:
+ # use one single type for the entire list
+ value_type = value_type * values_len
+ elif types_len != values_len:
+ # or complain if lists' lengths are different
+ raise XFConfException('Number of elements in "value" and "value_type" must be the same')
+
+ # fix boolean values
+ self.vars.value = [fix_bool(v[0]) if v[1] == 'bool' else v[0] for v in zip(self.vars.value, value_type)]
+
+ # calculates if it is an array
+ self.vars.is_array = \
+ bool(self.module.params['force_array']) or \
+ isinstance(self.vars.previous_value, list) or \
+ values_len > 1
+
+ params = ['channel', 'property', 'create']
+ if self.vars.is_array:
+ params.append('is_array')
+ params.append('values_and_types')
+
+ extra_params = dict(values_and_types=(self.vars.value, value_type))
+ extra_params['create'] = True
+ extra_params['is_array'] = self.vars.is_array
+
+ if not self.module.check_mode:
+ self.run_command(params=params, extra_params=extra_params)
+
+ if not self.vars.is_array:
+ self.vars.value = self.vars.value[0]
+ value_type = value_type[0]
+
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=self.vars.value,
+ type=value_type)
+
+
+def main():
+ xfconf = XFConfProperty()
+ xfconf.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py
new file mode 100644
index 00000000..907f1bae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
+# Copyright: (c) 2018, William Leemans <willie@elaba.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xfs_quota
+short_description: Manage quotas on XFS filesystems
+description:
+ - Configure quotas on XFS filesystems.
+ - Before using this module /etc/projects and /etc/projid need to be configured.
+author:
+- William Leemans (@bushvin)
+options:
+ type:
+ description:
+ - The XFS quota type.
+ type: str
+ required: true
+ choices:
+ - user
+ - group
+ - project
+ name:
+ description:
+ - The name of the user, group or project to apply the quota to, if other than default.
+ type: str
+ mountpoint:
+ description:
+ - The mount point on which to apply the quotas.
+ type: str
+ required: true
+ bhard:
+ description:
+ - Hard blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ bsoft:
+ description:
+ - Soft blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ ihard:
+ description:
+ - Hard inodes quota limit.
+ type: int
+ isoft:
+ description:
+ - Soft inodes quota limit.
+ type: int
+ rtbhard:
+ description:
+ - Hard realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ rtbsoft:
+ description:
+ - Soft realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ state:
+ description:
+ - Whether to apply the limits or remove them.
+ - When removing limit, they are set to 0, and not quite removed.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+
+requirements:
+ - xfsprogs
+'''
+
+EXAMPLES = r'''
+- name: Set default project soft and hard limit on /opt of 1g
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ bsoft: 1g
+ bhard: 1g
+ state: present
+
+- name: Remove the default limits on /opt
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ state: absent
+
+- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
+ community.general.xfs_quota:
+ type: user
+ mountpoint: /home
+ isoft: 1024
+ ihard: 2048
+
+'''
+
+RETURN = r'''
+bhard:
+ description: the current bhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+bsoft:
+ description: the current bsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+ihard:
+ description: the current ihard setting in bytes
+ returned: always
+ type: int
+ sample: 100
+isoft:
+ description: the current isoft setting in bytes
+ returned: always
+ type: int
+ sample: 100
+rtbhard:
+ description: the current rtbhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+rtbsoft:
+ description: the current rtbsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+'''
+
+import grp
+import os
+import pwd
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bhard=dict(type='str'),
+ bsoft=dict(type='str'),
+ ihard=dict(type='int'),
+ isoft=dict(type='int'),
+ mountpoint=dict(type='str', required=True),
+ name=dict(type='str'),
+ rtbhard=dict(type='str'),
+ rtbsoft=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ type=dict(type='str', required=True, choices=['group', 'project', 'user'])
+ ),
+ supports_check_mode=True,
+ )
+
+ quota_type = module.params['type']
+ name = module.params['name']
+ mountpoint = module.params['mountpoint']
+ bhard = module.params['bhard']
+ bsoft = module.params['bsoft']
+ ihard = module.params['ihard']
+ isoft = module.params['isoft']
+ rtbhard = module.params['rtbhard']
+ rtbsoft = module.params['rtbsoft']
+ state = module.params['state']
+
+ if bhard is not None:
+ bhard = human_to_bytes(bhard)
+
+ if bsoft is not None:
+ bsoft = human_to_bytes(bsoft)
+
+ if rtbhard is not None:
+ rtbhard = human_to_bytes(rtbhard)
+
+ if rtbsoft is not None:
+ rtbsoft = human_to_bytes(rtbsoft)
+
+ result = dict(
+ changed=False,
+ )
+
+ if not os.path.ismount(mountpoint):
+ module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
+
+ mp = get_fs_by_mountpoint(mountpoint)
+ if mp is None:
+ module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
+
+ if quota_type == 'user':
+ type_arg = '-u'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
+ 'qnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
+ )
+ try:
+ pwd.getpwnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'group':
+ type_arg = '-g'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
+ )
+ try:
+ grp.getgrnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'project':
+ type_arg = '-p'
+ quota_default = '#0'
+ if name is None:
+ name = quota_default
+
+ if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
+ module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projects'):
+ module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projid'):
+ module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
+
+ if name != quota_default and name is not None and get_project_id(name) is None:
+ module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
+
+ prj_set = True
+ if name != quota_default:
+ cmd = 'project %s' % name
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get project state.', **result)
+ else:
+ for line in stdout.split('\n'):
+ if "Project Id '%s' - is not set." in line:
+ prj_set = False
+ break
+
+ if not prj_set and not module.check_mode:
+ cmd = 'project -s'
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get quota realtime block report.', **result)
+
+ result['changed'] = True
+
+ elif not prj_set and module.check_mode:
+ result['changed'] = True
+
+ # Set limits
+ if state == 'absent':
+ bhard = 0
+ bsoft = 0
+ ihard = 0
+ isoft = 0
+ rtbhard = 0
+ rtbsoft = 0
+
+ current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
+ current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
+ current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
+
+ result['xfs_quota'] = dict(
+ bsoft=current_bsoft,
+ bhard=current_bhard,
+ isoft=current_isoft,
+ ihard=current_ihard,
+ rtbsoft=current_rtbsoft,
+ rtbhard=current_rtbhard
+ )
+
+ limit = []
+ if bsoft is not None and int(bsoft) != current_bsoft:
+ limit.append('bsoft=%s' % bsoft)
+ result['bsoft'] = int(bsoft)
+
+ if bhard is not None and int(bhard) != current_bhard:
+ limit.append('bhard=%s' % bhard)
+ result['bhard'] = int(bhard)
+
+ if isoft is not None and isoft != current_isoft:
+ limit.append('isoft=%s' % isoft)
+ result['isoft'] = isoft
+
+ if ihard is not None and ihard != current_ihard:
+ limit.append('ihard=%s' % ihard)
+ result['ihard'] = ihard
+
+ if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
+ limit.append('rtbsoft=%s' % rtbsoft)
+ result['rtbsoft'] = int(rtbsoft)
+
+ if rtbhard is not None and int(rtbhard) != current_rtbhard:
+ limit.append('rtbhard=%s' % rtbhard)
+ result['rtbhard'] = int(rtbhard)
+
+ if len(limit) > 0 and not module.check_mode:
+ if name == quota_default:
+ cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
+ else:
+ cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
+
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not set limits.', **result)
+
+ result['changed'] = True
+
+ elif len(limit) > 0 and module.check_mode:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def quota_report(module, mountpoint, name, quota_type, used_type):
+ soft = None
+ hard = None
+
+ if quota_type == 'project':
+ type_arg = '-p'
+ elif quota_type == 'user':
+ type_arg = '-u'
+ elif quota_type == 'group':
+ type_arg = '-g'
+
+ if used_type == 'b':
+ used_arg = '-b'
+ used_name = 'blocks'
+ factor = 1024
+ elif used_type == 'i':
+ used_arg = '-i'
+ used_name = 'inodes'
+ factor = 1
+ elif used_type == 'rtb':
+ used_arg = '-r'
+ used_name = 'realtime blocks'
+ factor = 1024
+
+ rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
+
+ if rc != 0:
+ result = dict(
+ changed=False,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
+
+ for line in stdout.split('\n'):
+ line = line.strip().split()
+ if len(line) > 3 and line[0] == name:
+ soft = int(line[2]) * factor
+ hard = int(line[3]) * factor
+ break
+
+ return soft, hard
+
+
+def exec_quota(module, cmd, mountpoint):
+ cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
+ (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
+ if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
+ rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
+ module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
+
+ return rc, stdout, stderr
+
+
+def get_fs_by_mountpoint(mountpoint):
+ mpr = None
+ with open('/proc/mounts', 'r') as s:
+ for line in s.readlines():
+ mp = line.strip().split()
+ if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
+ mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
+ mpr['mntopts'] = mpr['mntopts'].split(',')
+ break
+ return mpr
+
+
+def get_project_id(name):
+ prjid = None
+ with open('/etc/projid', 'r') as s:
+ for line in s.readlines():
+ line = line.strip().partition(':')
+ if line[0] == name:
+ prjid = line[2]
+ break
+
+ return prjid
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py
new file mode 100644
index 00000000..a1956129
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sysupgrade
+short_description: Manage OpenBSD system upgrades
+version_added: 1.1.0
+description:
+ - Manage OpenBSD system upgrades using sysupgrade.
+options:
+ snapshot:
+ description:
+ - Apply the latest snapshot.
+ - Otherwise release will be applied.
+ default: no
+ type: bool
+ force:
+ description:
+ - Force upgrade (for snapshots only).
+ default: no
+ type: bool
+ keep_files:
+ description:
+ - Keep the files under /home/_sysupgrade.
+ - By default, the files will be deleted after the upgrade.
+ default: no
+ type: bool
+ fetch_only:
+ description:
+ - Fetch and verify files and create /bsd.upgrade but do not reboot.
+ - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ default: yes
+ type: bool
+ installurl:
+ description:
+ - OpenBSD mirror top-level URL for fetching an upgrade.
+ - By default, the mirror URL is pulled from /etc/installurl.
+ type: str
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = r'''
+- name: Upgrade to latest release
+ community.general.sysupgrade:
+ register: sysupgrade
+
+- name: Upgrade to latest snapshot
+ community.general.sysupgrade:
+ snapshot: yes
+ installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD
+ register: sysupgrade
+
+- name: Reboot to apply upgrade if needed
+ ansible.builtin.reboot:
+ when: sysupgrade.changed
+
+# Note: Ansible will error when running this way due to how
+# the reboot is forcefully handled by sysupgrade:
+
+- name: Have sysupgrade automatically reboot
+ community.general.sysupgrade:
+ fetch_only: no
+ ignore_errors: yes
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+stdout:
+ description: Sysupgrade standard output.
+ returned: always
+ type: str
+stderr:
+ description: Sysupgrade standard error.
+ returned: always
+ type: str
+ sample: "sysupgrade: need root privileges"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sysupgrade_run(module):
+ sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
+ cmd = [sysupgrade_bin]
+ changed = False
+ warnings = []
+
+ # Setup command flags
+ if module.params['snapshot']:
+ run_flag = ['-s']
+ if module.params['force']:
+ # Force only applies to snapshots
+ run_flag.append('-f')
+ else:
+ # release flag
+ run_flag = ['-r']
+
+ if module.params['keep_files']:
+ run_flag.append('-k')
+
+ if module.params['fetch_only']:
+ run_flag.append('-n')
+
+ # installurl must be the last argument
+ if module.params['installurl']:
+ run_flag.append(module.params['installurl'])
+
+ rc, out, err = module.run_command(cmd + run_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('already on latest snapshot') >= 0:
+ changed = False
+ elif out.lower().find('upgrade on next reboot') >= 0:
+ changed = True
+
+ return dict(
+ changed=changed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ snapshot=dict(type='bool', default=False),
+ fetch_only=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ keep_files=dict(type='bool', default=False),
+ installurl=dict(type='str'),
+ ),
+ supports_check_mode=False,
+ )
+ return_dict = sysupgrade_run(module)
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py
new file mode 100644
index 00000000..ae8f31c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+options:
+ taiga_host:
+ type: str
+ description:
+ - The hostname of the Taiga instance.
+ default: https://api.taiga.io
+ project:
+ type: str
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ type: str
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ type: str
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ type: str
+ description:
+ - The issue priority. Must exist previously.
+ default: Normal
+ status:
+ type: str
+ description:
+ - The issue status. Must exist previously.
+ default: New
+ severity:
+ type: str
+ description:
+ - The issue severity. Must exist previously.
+ default: Normal
+ description:
+ type: str
+ description:
+ - The issue description.
+ default: ""
+ attachment:
+ type: path
+ description:
+ - Path to a file to be attached to the issue.
+ attachment_description:
+ type: str
+ description:
+ - A string describing the file to be attached to the issue.
+ default: ""
+ tags:
+ type: list
+ elements: str
+ description:
+ - A lists of tags to be assigned to the issue.
+ default: []
+ state:
+ type: str
+ description:
+ - Whether the issue should be present or not.
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+- name: Create an issue in the my hosted Taiga environment and attach an error log
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+- name: Deletes the previously created issue
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+import traceback
+
+from os import getenv
+from os.path import isfile
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+TAIGA_IMP_ERR = None
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED = True
+except ImportError:
+ TAIGA_IMP_ERR = traceback.format_exc()
+ TAIGA_MODULE_IMPORTED = False
+
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException as exc:
+ msg = "An exception happened: %s" % to_native(exc)
+ return (False, changed, msg, {})
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ project=dict(type='str', required=True),
+ subject=dict(type='str', required=True),
+ issue_type=dict(type='str', required=True),
+ priority=dict(type='str', required=False, default="Normal"),
+ status=dict(type='str', required=False, default="New"),
+ severity=dict(type='str', required=False, default="Normal"),
+ description=dict(type='str', required=False, default=""),
+ attachment=dict(type='path', required=False, default=None),
+ attachment_description=dict(type='str', required=False, default=""),
+ tags=dict(required=False, default=[], type='list', elements='str'),
+ state=dict(type='str', required=False, choices=['present', 'absent'],
+ default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ module.fail_json(msg=missing_required_lib("python-taiga"),
+ exception=TAIGA_IMP_ERR)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py
new file mode 100644
index 00000000..c1ef841c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: telegram
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ msg_format:
+ type: str
+ description:
+ - Message format. Formatting options `markdown` and `html` described in
+ Telegram API docs (https://core.telegram.org/bots/api#formatting-options).
+ If option `plain` set, message will not be formatted.
+ default: plain
+ choices: [ "plain", "markdown", "html" ]
+ token:
+ type: str
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ type: str
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+'''
+
+EXAMPLES = """
+
+- name: Send a message to chat in playbook
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ chat_id: 000000
+ msg: Ansible task finished
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: str
+ sample: "Ansible task finished"
+telegram_error:
+ description: Error message gotten from Telegram API
+ returned: failure
+ type: str
+ sample: "Bad Request: message text is empty"
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ chat_id=dict(type='str', required=True, no_log=True),
+ msg_format=dict(type='str', required=False, default='plain',
+ choices=['plain', 'markdown', 'html']),
+ msg=dict(type='str', required=True)),
+ supports_check_mode=True
+ )
+
+ token = quote(module.params.get('token'))
+ chat_id = quote(module.params.get('chat_id'))
+ msg_format = quote(module.params.get('msg_format'))
+ msg = quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/bot' + token + \
+ '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+ if msg_format in ('markdown', 'html'):
+ url += '&parse_mode=' + msg_format
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ body = json.loads(info['body'])
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']),
+ telegram_error=body['description'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py
new file mode 100644
index 00000000..680bab9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: terraform
+short_description: Manages a Terraform deployment (and plans)
+description:
+ - Provides support for deploying resources with Terraform and pulling
+ resource information back into Ansible.
+options:
+ state:
+ choices: ['planned', 'present', 'absent']
+ description:
+ - Goal state of given stage/project
+ type: str
+ default: present
+ binary_path:
+ description:
+ - The path of a terraform binary to use, relative to the 'service_path'
+ unless you supply an absolute path.
+ type: path
+ project_path:
+ description:
+ - The path to the root of the Terraform directory with the
+ vars.tf/main.tf/etc to use.
+ type: path
+ required: true
+ workspace:
+ description:
+ - The terraform workspace to work with.
+ type: str
+ default: default
+ purge_workspace:
+ description:
+ - Only works with state = absent
+ - If true, the workspace will be deleted after the "terraform destroy" action.
+ - The 'default' workspace will not be deleted.
+ default: false
+ type: bool
+ plan_file:
+ description:
+ - The path to an existing Terraform plan file to apply. If this is not
+ specified, Ansible will build a new TF plan and execute it.
+ Note that this option is required if 'state' has the 'planned' value.
+ type: path
+ state_file:
+ description:
+ - The path to an existing Terraform state file to use when building plan.
+ If this is not specified, the default `terraform.tfstate` will be used.
+ - This option is ignored when plan is specified.
+ type: path
+ variables_files:
+ description:
+ - The path to a variables file for Terraform to fill into the TF
+ configurations. This can accept a list of paths to multiple variables files.
+ - Up until Ansible 2.9, this option was usable as I(variables_file).
+ type: list
+ elements: path
+ aliases: [ 'variables_file' ]
+ variables:
+ description:
+ - A group of key-values to override template variables or those in
+ variables files.
+ type: dict
+ targets:
+ description:
+ - A list of specific resources to target in this plan/application. The
+ resources selected here will also auto-include any dependencies.
+ type: list
+ elements: str
+ lock:
+ description:
+ - Enable statefile locking, if you use a service that accepts locks (such
+ as S3+DynamoDB) to store your statefile.
+ type: bool
+ default: true
+ lock_timeout:
+ description:
+ - How long to maintain the lock on the statefile, if you use a service
+ that accepts locks (such as S3+DynamoDB).
+ type: int
+ force_init:
+ description:
+ - To avoid duplicating infra, if a state file can't be found this will
+ force a `terraform init`. Generally, this should be turned off unless
+ you intend to provision an entirely new Terraform deployment.
+ default: false
+ type: bool
+ backend_config:
+ description:
+ - A group of key-values to provide at init stage to the -backend-config parameter.
+ type: dict
+ backend_config_files:
+ description:
+ - The path to a configuration file to provide at init state to the -backend-config parameter.
+ This can accept a list of paths to multiple configuration files.
+ type: list
+ elements: path
+ version_added: '0.2.0'
+ init_reconfigure:
+ description:
+ - Forces backend reconfiguration during init.
+ default: false
+ type: bool
+ version_added: '1.3.0'
+notes:
+ - To just run a `terraform plan`, use check mode.
+requirements: [ "terraform" ]
+author: "Ryan Scott Brown (@ryansb)"
+'''
+
+EXAMPLES = """
+- name: Basic deploy of a service
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+
+- name: Define the backend configuration at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config:
+ region: "eu-west-1"
+ bucket: "some-bucket"
+ key: "random.tfstate"
+
+- name: Define the backend configuration with one or more files at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config_files:
+ - /path/to/backend_config_file_1
+ - /path/to/backend_config_file_2
+"""
+
+RETURN = """
+outputs:
+ type: complex
+ description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
+ returned: on success
+ sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
+ contains:
+ sensitive:
+ type: bool
+ returned: always
+ description: Whether Terraform has marked this value as sensitive
+ type:
+ type: str
+ returned: always
+ description: The type of the value (string, int, etc)
+ value:
+ type: str
+ returned: always
+ description: The value of the output as interpolated by Terraform
+stdout:
+ type: str
+ description: Full `terraform` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: terraform apply ...
+"""
+
+import os
+import json
+import tempfile
+from ansible.module_utils.six.moves import shlex_quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+DESTROY_ARGS = ('destroy', '-no-color', '-force')
+APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+module = None
+
+
+def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
+ if project_path in [None, ''] or '/' not in project_path:
+ module.fail_json(msg="Path for Terraform project can not be None or ''.")
+ if not os.path.exists(bin_path):
+ module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
+ if not os.path.isdir(project_path):
+ module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+
+ rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
+
+
+def _state_args(state_file):
+ if state_file and os.path.exists(state_file):
+ return ['-state', state_file]
+ if state_file and not os.path.exists(state_file):
+ module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
+ return []
+
+
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure):
+ command = [bin_path, 'init', '-input=false']
+ if backend_config:
+ for key, val in backend_config.items():
+ command.extend([
+ '-backend-config',
+ shlex_quote('{0}={1}'.format(key, val))
+ ])
+ if backend_config_files:
+ for f in backend_config_files:
+ command.extend(['-backend-config', f])
+ if init_reconfigure:
+ command.extend(['-reconfigure'])
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+
+
+def get_workspace_context(bin_path, project_path):
+ workspace_ctx = {"current": "default", "all": []}
+ command = [bin_path, 'workspace', 'list', '-no-color']
+ rc, out, err = module.run_command(command, cwd=project_path)
+ if rc != 0:
+ module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
+ for item in out.split('\n'):
+ stripped_item = item.strip()
+ if not stripped_item:
+ continue
+ elif stripped_item.startswith('* '):
+ workspace_ctx["current"] = stripped_item.replace('* ', '')
+ else:
+ workspace_ctx["all"].append(stripped_item)
+ return workspace_ctx
+
+
+def _workspace_cmd(bin_path, project_path, action, workspace):
+ command = [bin_path, 'workspace', action, workspace, '-no-color']
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ return rc, out, err
+
+
+def create_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'new', workspace)
+
+
+def select_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'select', workspace)
+
+
+def remove_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace)
+
+
+def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
+ if plan_path is None:
+ f, plan_path = tempfile.mkstemp(suffix='.tfplan')
+
+ plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
+
+ for t in (module.params.get('targets') or []):
+ plan_command.extend(['-target', t])
+
+ plan_command.extend(_state_args(state_file))
+
+ rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
+
+ if rc == 0:
+ # no changes
+ return plan_path, False, out, err, plan_command if state == 'planned' else command
+ elif rc == 1:
+ # failure to plan
+ module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
+ elif rc == 2:
+ # changes, but successful
+ return plan_path, True, out, err, plan_command if state == 'planned' else command
+
+ module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_path=dict(required=True, type='path'),
+ binary_path=dict(type='path'),
+ workspace=dict(required=False, type='str', default='default'),
+ purge_workspace=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'planned']),
+ variables=dict(type='dict'),
+ variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None),
+ plan_file=dict(type='path'),
+ state_file=dict(type='path'),
+ targets=dict(type='list', elements='str', default=[]),
+ lock=dict(type='bool', default=True),
+ lock_timeout=dict(type='int',),
+ force_init=dict(type='bool', default=False),
+ backend_config=dict(type='dict', default=None),
+ backend_config_files=dict(type='list', elements='path', default=None),
+ init_reconfigure=dict(required=False, type='bool', default=False),
+ ),
+ required_if=[('state', 'planned', ['plan_file'])],
+ supports_check_mode=True,
+ )
+
+ project_path = module.params.get('project_path')
+ bin_path = module.params.get('binary_path')
+ workspace = module.params.get('workspace')
+ purge_workspace = module.params.get('purge_workspace')
+ state = module.params.get('state')
+ variables = module.params.get('variables') or {}
+ variables_files = module.params.get('variables_files')
+ plan_file = module.params.get('plan_file')
+ state_file = module.params.get('state_file')
+ force_init = module.params.get('force_init')
+ backend_config = module.params.get('backend_config')
+ backend_config_files = module.params.get('backend_config_files')
+ init_reconfigure = module.params.get('init_reconfigure')
+
+ if bin_path is not None:
+ command = [bin_path]
+ else:
+ command = [module.get_bin_path('terraform', required=True)]
+
+ if force_init:
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
+
+ workspace_ctx = get_workspace_context(command[0], project_path)
+ if workspace_ctx["current"] != workspace:
+ if workspace not in workspace_ctx["all"]:
+ create_workspace(command[0], project_path, workspace)
+ else:
+ select_workspace(command[0], project_path, workspace)
+
+ if state == 'present':
+ command.extend(APPLY_ARGS)
+ elif state == 'absent':
+ command.extend(DESTROY_ARGS)
+
+ variables_args = []
+ for k, v in variables.items():
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, v)
+ ])
+ if variables_files:
+ for f in variables_files:
+ variables_args.extend(['-var-file', f])
+
+ preflight_validation(command[0], project_path, variables_args)
+
+ if module.params.get('lock') is not None:
+ if module.params.get('lock'):
+ command.append('-lock=true')
+ else:
+ command.append('-lock=false')
+ if module.params.get('lock_timeout') is not None:
+ command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
+
+ for t in (module.params.get('targets') or []):
+ command.extend(['-target', t])
+
+ # we aren't sure if this plan will result in changes, so assume yes
+ needs_application, changed = True, False
+
+ out, err = '', ''
+
+ if state == 'absent':
+ command.extend(variables_args)
+ elif state == 'present' and plan_file:
+ if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
+ command.append(plan_file)
+ else:
+ module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
+ else:
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, plan_file)
+ command.append(plan_file)
+
+ if needs_application and not module.check_mode and not state == 'planned':
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ # checks out to decide if changes were made during execution
+ if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
+ changed = True
+
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
+ if rc == 1:
+ module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
+ outputs = {}
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when getting Terraform outputs. "
+ "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
+ command=' '.join(outputs_command))
+ else:
+ outputs = json.loads(outputs_text)
+
+ # Restore the Terraform workspace found when running the module
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ if state == 'absent' and workspace != 'default' and purge_workspace is True:
+ remove_workspace(command[0], project_path, workspace)
+
+ module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py
new file mode 100644
index 00000000..d10dd9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py
@@ -0,0 +1,905 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock.
+ If you want to set up the NTP, use M(ansible.builtin.service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ community.general.timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py
new file mode 100644
index 00000000..5ec995f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ type: str
+ description:
+ user's Twilio account token found on the account page
+ required: true
+ auth_token:
+ type: str
+ description: user's Twilio authentication token
+ required: true
+ msg:
+ type: str
+ description:
+ the body of the text message
+ required: true
+ to_numbers:
+ type: list
+ description:
+ one or more phone numbers to send the text message to,
+ format +15551112222
+ required: true
+ aliases: [ to_number ]
+ from_number:
+ type: str
+ description:
+ the Twilio number to send the text message from, format +15551112222
+ required: true
+ media_url:
+ type: str
+ description:
+ a URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: All servers with webserver role are now configured.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: This server configuration is now complete.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15553258899
+ to_numbers:
+ - +15551113232
+ - +12025551235
+ - +19735559010
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: Deployment complete!
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ media_url: https://demo.twilio.com/logo.png
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_numbers=dict(required=True, aliases=['to_number'], type='list'),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_numbers = module.params['to_numbers']
+ media_url = module.params['media_url']
+
+ for number in to_numbers:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = module.from_json(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py
new file mode 100644
index 00000000..6f8e4e8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: typetalk
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API
+options:
+ client_id:
+ type: str
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ type: str
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ type: int
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ type: str
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to typetalk
+ community.general.typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.com/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.com/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError as e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py
new file mode 100644
index 00000000..db89bd46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+ - Univention
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS record on a UCS
+ community.general.udm_dns_record:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ a:
+ - 192.0.2.1
+ - 2001:0db8::42
+'''
+
+
+RETURN = '''#'''
+
+HAVE_UNIVENTION = False
+try:
+ from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+ )
+ HAVE_UNIVENTION = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ type='str'),
+ name=dict(required=True,
+ type='str'),
+ data=dict(default=[],
+ type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['data'])
+ ])
+ )
+
+ if not HAVE_UNIVENTION:
+ module.fail_json(msg="This module requires univention python bindings")
+
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
+ dn = 'relativeDomainName={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
new file mode 100644
index 00000000..2428650e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ - "The available choices are: C(forward_zone), C(reverse_zone)."
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS zone on a UCS
+ community.general.udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{0}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list'),
+ interfaces=dict(default=[],
+ type='list'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{0}'.format(base_dn())
+ dn = 'zoneName={0},{1}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{0}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{0}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py
new file mode 100644
index 00000000..d2cf2aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ type: str
+ description:
+ required: false
+ description:
+ - Group description.
+ type: str
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ type: str
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ type: str
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+ type: str
+ default: "cn=groups"
+'''
+
+
+EXAMPLES = '''
+- name: Create a POSIX group
+ community.general.udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+
+# or
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(default=None,
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={0},{1}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing group {0} in {1} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing group {0} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py
new file mode 100644
index 00000000..3e8fb207
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name
+ type: str
+ host:
+ required: false
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ type: str
+ path:
+ required: false
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ type: path
+ sambaName:
+ required: false
+ description:
+ - Windows name. Required if C(state=present).
+ type: str
+ aliases: [ samba_name ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ type: str
+ owner:
+ default: '0'
+ description:
+ - Directory owner of the share's root directory.
+ type: str
+ group:
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ type: str
+ directorymode:
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ type: str
+ root_squash:
+ default: true
+ description:
+ - Modify user ID for root user (root squashing).
+ type: bool
+ subtree_checking:
+ default: true
+ description:
+ - Subtree checking.
+ type: bool
+ sync:
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ type: str
+ writeable:
+ default: true
+ description:
+ - NFS write access.
+ type: bool
+ sambaBlockSize:
+ description:
+ - Blocking size.
+ type: str
+ aliases: [ samba_block_size ]
+ sambaBlockingLocks:
+ default: true
+ description:
+ - Blocking locks.
+ type: bool
+ aliases: [ samba_blocking_locks ]
+ sambaBrowseable:
+ description:
+ - Show in Windows network environment.
+ type: bool
+ default: True
+ aliases: [ samba_browsable ]
+ sambaCreateMode:
+ default: '0744'
+ description:
+ - File mode.
+ type: str
+ aliases: [ samba_create_mode ]
+ sambaCscPolicy:
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ type: str
+ aliases: [ samba_csc_policy ]
+ sambaCustomSettings:
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ type: list
+ aliases: [ samba_custom_settings ]
+ sambaDirectoryMode:
+ default: '0755'
+ description:
+ - Directory mode.
+ type: str
+ aliases: [ samba_directory_mode ]
+ sambaDirectorySecurityMode:
+ default: '0777'
+ description:
+ - Directory security mode.
+ type: str
+ aliases: [ samba_directory_security_mode ]
+ sambaDosFilemode:
+ default: false
+ description:
+ - Users with write access may modify permissions.
+ type: bool
+ aliases: [ samba_dos_filemode ]
+ sambaFakeOplocks:
+ default: false
+ description:
+ - Fake oplocks.
+ type: bool
+ aliases: [ samba_fake_oplocks ]
+ sambaForceCreateMode:
+ default: false
+ description:
+ - Force file mode.
+ type: bool
+ aliases: [ samba_force_create_mode ]
+ sambaForceDirectoryMode:
+ default: false
+ description:
+ - Force directory mode.
+ type: bool
+ aliases: [ samba_force_directory_mode ]
+ sambaForceDirectorySecurityMode:
+ default: false
+ description:
+ - Force directory security mode.
+ type: bool
+ aliases: [ samba_force_directory_security_mode ]
+ sambaForceGroup:
+ description:
+ - Force group.
+ type: str
+ aliases: [ samba_force_group ]
+ sambaForceSecurityMode:
+ default: false
+ description:
+ - Force security mode.
+ type: bool
+ aliases: [ samba_force_security_mode ]
+ sambaForceUser:
+ description:
+ - Force user.
+ type: str
+ aliases: [ samba_force_user ]
+ sambaHideFiles:
+ description:
+ - Hide files.
+ type: str
+ aliases: [ samba_hide_files ]
+ sambaHideUnreadable:
+ default: false
+ description:
+ - Hide unreadable files/directories.
+ type: bool
+ aliases: [ samba_hide_unreadable ]
+ sambaHostsAllow:
+ default: []
+ description:
+ - Allowed host/network.
+ type: list
+ aliases: [ samba_hosts_allow ]
+ sambaHostsDeny:
+ default: []
+ description:
+ - Denied host/network.
+ type: list
+ aliases: [ samba_hosts_deny ]
+ sambaInheritAcls:
+ default: true
+ description:
+ - Inherit ACLs.
+ type: bool
+ aliases: [ samba_inherit_acls ]
+ sambaInheritOwner:
+ default: false
+ description:
+ - Create files/directories with the owner of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_owner ]
+ sambaInheritPermissions:
+ default: false
+ description:
+ - Create files/directories with permissions of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_permissions ]
+ sambaInvalidUsers:
+ description:
+ - Invalid users or groups.
+ type: str
+ aliases: [ samba_invalid_users ]
+ sambaLevel2Oplocks:
+ default: true
+ description:
+ - Level 2 oplocks.
+ type: bool
+ aliases: [ samba_level_2_oplocks ]
+ sambaLocking:
+ default: true
+ description:
+ - Locking.
+ type: bool
+ aliases: [ samba_locking ]
+ sambaMSDFSRoot:
+ default: false
+ description:
+ - MSDFS root.
+ type: bool
+ aliases: [ samba_msdfs_root ]
+ sambaNtAclSupport:
+ default: true
+ description:
+ - NT ACL support.
+ type: bool
+ aliases: [ samba_nt_acl_support ]
+ sambaOplocks:
+ default: true
+ description:
+ - Oplocks.
+ type: bool
+ aliases: [ samba_oplocks ]
+ sambaPostexec:
+ description:
+ - Postexec script.
+ type: str
+ aliases: [ samba_postexec ]
+ sambaPreexec:
+ description:
+ - Preexec script.
+ type: str
+ aliases: [ samba_preexec ]
+ sambaPublic:
+ default: false
+ description:
+ - Allow anonymous read-only access with a guest user.
+ type: bool
+ aliases: [ samba_public ]
+ sambaSecurityMode:
+ default: '0777'
+ description:
+ - Security mode.
+ type: str
+ aliases: [ samba_security_mode ]
+ sambaStrictLocking:
+ default: 'Auto'
+ description:
+ - Strict locking.
+ type: str
+ aliases: [ samba_strict_locking ]
+ sambaVFSObjects:
+ description:
+ - VFS objects.
+ type: str
+ aliases: [ samba_vfs_objects ]
+ sambaValidUsers:
+ description:
+ - Valid users or groups.
+ type: str
+ aliases: [ samba_valid_users ]
+ sambaWriteList:
+ description:
+ - Restrict write access to these users/groups.
+ type: str
+ aliases: [ samba_write_list ]
+ sambaWriteable:
+ default: true
+ description:
+ - Samba write access.
+ type: bool
+ aliases: [ samba_writeable ]
+ nfs_hosts:
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ type: list
+ nfsCustomSettings:
+ default: []
+ description:
+ - Option name in exports file.
+ type: list
+ aliases: [ nfs_custom_settings ]
+'''
+
+
+EXAMPLES = '''
+- name: Create a share named home on the server ucs.example.com with the path /home
+ community.general.udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path',
+ default=None),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str',
+ default=None),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
+ dn = 'cn={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as err:
+ module.fail_json(
+ msg='Creating/editing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as err:
+ module.fail_json(
+ msg='Removing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py
new file mode 100644
index 00000000..efbd95f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: udm_user
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ type: str
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ type: str
+ firstname:
+ description:
+ - First name. Required if C(state=present).
+ type: str
+ lastname:
+ description:
+ - Last name. Required if C(state=present).
+ type: str
+ password:
+ description:
+ - Password. Required if C(state=present).
+ type: str
+ birthday:
+ description:
+ - Birthday
+ type: str
+ city:
+ description:
+ - City of users business address.
+ type: str
+ country:
+ description:
+ - Country of users business address.
+ type: str
+ department_number:
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ type: str
+ description:
+ description:
+ - Description (not gecos)
+ type: str
+ display_name:
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ type: str
+ email:
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ type: list
+ employee_number:
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ type: str
+ employee_type:
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ type: str
+ gecos:
+ description:
+ - GECOS
+ type: str
+ groups:
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ type: list
+ home_share:
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ type: str
+ home_share_path:
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ type: str
+ home_telephone_number:
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ type: list
+ homedrive:
+ description:
+ - Windows home drive, e.g. C("H:").
+ type: str
+ mail_alternative_address:
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ type: list
+ mail_home_server:
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ type: str
+ mail_primary_address:
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ type: str
+ mobile_telephone_number:
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ type: list
+ organisation:
+ description:
+ - Organisation
+ aliases: [ organization ]
+ type: str
+ overridePWHistory:
+ type: bool
+ default: 'no'
+ description:
+ - Override password history
+ aliases: [ override_pw_history ]
+ overridePWLength:
+ type: bool
+ default: 'no'
+ description:
+ - Override password check
+ aliases: [ override_pw_length ]
+ pager_telephonenumber:
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ type: list
+ phone:
+ description:
+ - List of telephone numbers.
+ type: list
+ postcode:
+ description:
+ - Postal code of users business address.
+ type: str
+ primary_group:
+ description:
+ - Primary group. This must be the group LDAP DN.
+ - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ aliases: [ primaryGroup ]
+ type: str
+ profilepath:
+ description:
+ - Windows profile directory
+ type: str
+ pwd_change_next_login:
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ type: str
+ room_number:
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ type: str
+ samba_privileges:
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ type: list
+ samba_user_workstations:
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ type: list
+ sambahome:
+ description:
+ - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ type: str
+ scriptpath:
+ description:
+ - Windows logon script.
+ type: str
+ secretary:
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ type: list
+ serviceprovider:
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ type: list
+ shell:
+ default: '/bin/bash'
+ description:
+ - Login shell
+ type: str
+ street:
+ description:
+ - Street of users business address.
+ type: str
+ title:
+ description:
+ - Title, e.g. C(Prof.).
+ type: str
+ unixhome:
+ description:
+ - Unix home directory
+ - If not specified, it defaults to C(/home/$USERNAME).
+ type: str
+ userexpiry:
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ - If not specified, it defaults to the current day plus one year.
+ type: str
+ position:
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ type: str
+ update_password:
+ default: always
+ choices: [ always, on_create ]
+ description:
+ - "C(always) will update passwords if they differ.
+ C(on_create) will only set the password for newly created users."
+ type: str
+ ou:
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ type: str
+ subpath:
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create a user on a UCS
+ community.general.udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+
+# or define the position
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+import crypt
+from datetime import date, timedelta
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec=dict(
+ birthday=dict(type='str'),
+ city=dict(type='str'),
+ country=dict(type='str'),
+ department_number=dict(type='str',
+ aliases=['departmentNumber']),
+ description=dict(type='str'),
+ display_name=dict(type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list'),
+ employee_number=dict(type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(type='str',
+ aliases=['employeeType']),
+ firstname=dict(type='str'),
+ gecos=dict(type='str'),
+ groups=dict(default=[],
+ type='list'),
+ home_share=dict(type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(type='str'),
+ lastname=dict(type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(type='str',
+ aliases=['organization']),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password=dict(type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list'),
+ postcode=dict(type='str'),
+ primary_group=dict(type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(type='str'),
+ pwd_change_next_login=dict(type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(type='str'),
+ scriptpath=dict(type='str'),
+ secretary=dict(default=[],
+ type='list'),
+ serviceprovider=dict(default=[''],
+ type='list'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(type='str'),
+ title=dict(type='str'),
+ unixhome=dict(type='str'),
+ userexpiry=dict(type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={0}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={0},{1}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{0} {1}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{0}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ if 'userexpiry' in obj and obj.get('userexpiry') is None:
+ obj['userexpiry'] = expiry
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ if module.params['update_password'] == 'always':
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing user {0} in {1} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Adding groups to user {0} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing user {0} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py
new file mode 100644
index 00000000..c6df6fe6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright: (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ default: false
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: '0.2.0'
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: '0.2.0'
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ default: false
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ community.general.ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ community.general.ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- community.general.ufw:
+ rule: reject
+ port: auth
+ log: yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- community.general.ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- community.general.ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ community.general.ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
+
+- name: Deny all access to port 53
+ community.general.ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ community.general.ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ community.general.ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ community.general.ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ community.general.ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ community.general.ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
+ has_ipv6 = any([ipv6 for (no, ipv6) in lines])
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py
new file mode 100644
index 00000000..bb4e60fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ choices: [ "started", "paused" ]
+ monitorid:
+ type: str
+ description:
+ - ID of the monitor to check.
+ required: true
+ apikey:
+ type: str
+ description:
+ - Uptime Robot API key.
+ required: true
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+- name: Pause the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
+
+- name: Start the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+API_BASE = "https://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True, no_log=True),
+ monitorid=dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py
new file mode 100644
index 00000000..9d54fbcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+options:
+ name:
+ description:
+ - A list of package names to install, upgrade or remove.
+ required: yes
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update the package database first C(urpmi.update -a).
+ type: bool
+ default: no
+ aliases: ['update-cache']
+ no_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ type: bool
+ default: yes
+ aliases: ['no-recommends']
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ type: bool
+ default: yes
+ root:
+ description:
+ - Specifies an alternative install root, relative to which all packages will be installed.
+ Corresponds to the C(--root) option for I(urpmi).
+ aliases: [ installroot ]
+ type: str
+author:
+- Philippe Makowski (@pmakowski)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.urpmi:
+ pkg: foo
+ state: present
+
+- name: Remove package foo
+ community.general.urpmi:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.urpmi:
+ pkg: foo,bar
+ state: absent
+
+- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- community.general.urpmi:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+
+ urpmiupdate_path = module.get_bin_path("urpmi.update", True)
+ cmd = "%s -a -q" % (urpmiupdate_path,)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages, root):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package, root):
+ continue
+
+ urpme_path = module.get_bin_path("urpme", True)
+ cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, root, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ urpmi_path = module.get_bin_path("urpmi", True)
+ cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes,
+ no_recommends_yes,
+ root_option(root),
+ packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def root_option(root):
+ if (root):
+ return "--root=%s" % (root)
+ else:
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ force=dict(type='bool', default=True),
+ no_recommends=dict(type='bool', default=True, aliases=['no-recommends']),
+ name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']),
+ root=dict(type='str', aliases=['installroot']),
+ ),
+ )
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, p['name'], p['root'], p['force'], p['no_recommends'])
+
+ elif p['state'] in ['removed', 'absent']:
+ remove_packages(module, p['name'], p['root'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
new file mode 100644
index 00000000..b4aca155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy an aaa group object in Sophos UTM.
+
+description:
+ - Create, update or destroy an aaa group object in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ type: str
+ required: true
+ adirectory_groups:
+ description:
+ - List of adirectory group strings.
+ type: list
+ elements: str
+ adirectory_groups_sids:
+ description:
+ - Dictionary of group sids.
+ type: dict
+ backend_match:
+ description:
+ - The backend for the group.
+ type: str
+ choices:
+ - none
+ - adirectory
+ - edirectory
+ - radius
+ - tacacs
+ - ldap
+ default: none
+ comment:
+ description:
+ - Comment that describes the AAA group.
+ type: str
+ default: ''
+ dynamic:
+ description:
+ - Group type. Is static if none is selected.
+ type: str
+ default: none
+ choices:
+ - none
+ - ipsec_dn
+ - directory_groups
+ edirectory_groups:
+ description:
+ - List of edirectory group strings.
+ type: list
+ elements: str
+ ipsec_dn:
+ description:
+ - The ipsec dn string.
+ type: str
+ ldap_attribute:
+ description:
+ - The ldap attribute to check against.
+ type: str
+ ldap_attribute_value:
+ description:
+ - The ldap attribute value to check against.
+ type: str
+ members:
+ description:
+ - A list of user ref names (aaa/user).
+ type: list
+ elements: str
+ default: []
+ network:
+ description:
+ - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa).
+ type: str
+ default: ""
+ radius_groups:
+ description:
+ - A list of radius group strings.
+ type: list
+ elements: str
+ default: []
+ tacacs_groups:
+ description:
+ - A list of tacacs group strings.
+ type: list
+ elements: str
+ default: []
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ backend_match: ldap
+ dynamic: directory_groups
+ ldap_attributes: memberof
+ ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com"
+ network: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created.
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object.
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked.
+ type: bool
+ _type:
+ description: The type of the object.
+ type: str
+ name:
+ description: The name of the object.
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups.
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS.
+ type: list
+ backend_match:
+ description: The backend to use.
+ type: str
+ comment:
+ description: The comment string.
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group.
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups.
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match.
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against.
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against.
+ type: str
+ members:
+ description: List of member identifiers of the group.
+ type: list
+ network:
+ description: The identifier of the network (network/aaa).
+ type: str
+ radius_group:
+ description: The radius group identifier.
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic",
+ "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members",
+ "network", "radius_groups", "tacacs_groups"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ adirectory_groups_sids=dict(type='dict', required=False, default={}),
+ backend_match=dict(type='str', required=False, default="none",
+ choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
+ comment=dict(type='str', required=False, default=""),
+ dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ ipsec_dn=dict(type='str', required=False, default=""),
+ ldap_attribute=dict(type='str', required=False, default=""),
+ ldap_attribute_value=dict(type='str', required=False, default=""),
+ members=dict(type='list', elements='str', required=False, default=[]),
+ network=dict(type='str', required=False, default=""),
+ radius_groups=dict(type='list', elements='str', required=False, default=[]),
+ tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
new file mode 100644
index 00000000..6d230c1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: get info for reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - get info for a reverse_proxy frontend entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS
+ type: list
+ backend_match:
+ description: The backend to use
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against
+ type: str
+ members:
+ description: List of member identifiers of the group
+ type: list
+ network:
+ description: The identifier of the network (network/aaa)
+ type: str
+ radius_group:
+ description: The radius group identifier
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
new file mode 100644
index 00000000..e940f416
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy ca host_key_cert entry in Sophos UTM
+
+description:
+ - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ required: true
+ type: str
+ ca:
+ description:
+ - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ required: true
+ type: str
+ meta:
+ description:
+ - A reference to an existing utm_ca_meta_x509 object.
+ required: true
+ type: str
+ certificate:
+ description:
+ - The certificate in PEM format.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment string.
+ type: str
+ encrypted:
+ description:
+ - Optionally enable encryption.
+ default: False
+ type: bool
+ key:
+ description:
+ - Optional private key in PEM format.
+ type: str
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ ca: REF_ca/signing_ca_OBJECT_STRING
+ meta: REF_ca/meta_x509_OBJECT_STRING
+ certificate: |
+ --- BEGIN CERTIFICATE ---
+ . . .
+ . . .
+ . . .
+ --- END CERTIFICATE ---
+ state: present
+
+- name: Remove a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: absent
+
+- name: Read a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ca=dict(type='str', required=True),
+ meta=dict(type='str', required=True),
+ certificate=dict(type='str', required=True),
+ comment=dict(type='str', required=False),
+ encrypted=dict(type='bool', required=False, default=False),
+ key=dict(type='str', required=False, no_log=True),
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
new file mode 100644
index 00000000..ad315df9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert_info
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Get info for a ca host_key_cert entry in Sophos UTM
+
+description:
+ - Get info for a ca host_key_cert entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get info for a ca host_key_cert entry
+ community.general.utm_ca_host_key_cert_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py
new file mode 100644
index 00000000..1f080abf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_dns_host
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy dns entry in Sophos UTM
+
+description:
+ - Create, update or destroy a dns entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The IPV4 Address of the entry. Can be left empty for automatic resolving.
+ default: 0.0.0.0
+ address6:
+ type: str
+ description:
+ - The IPV6 Address of the entry. Can be left empty for automatic resolving.
+ default: "::"
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the dns host object
+ hostname:
+ type: str
+ description:
+ - The hostname for the dns host object
+ interface:
+ type: str
+ description:
+ - The reference name of the interface to use. If not provided the default interface will be used
+ resolved:
+ description:
+ - whether the hostname's ipv4 address is already resolved or not
+ default: False
+ type: bool
+ resolved6:
+ description:
+ - whether the hostname's ipv6 address is already resolved or not
+ default: False
+ type: bool
+ timeout:
+ type: int
+ description:
+ - the timeout for the utm to resolve the ip address for the hostname again
+ default: 0
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ hostname: testentry.some.tld
+ state: present
+
+- name: Remove UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ipv4 address of the object
+ type: str
+ address6:
+ description: The ipv6 address of the object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ hostname:
+ description: The hostname of the object
+ type: str
+ interface:
+ description: The reference name of the interface the object is associated with
+ type: str
+ resolved:
+ description: Whether the ipv4 address is resolved or not
+ type: bool
+ resolved6:
+ description: Whether the ipv6 address is resolved or not
+ type: bool
+ timeout:
+ description: The timeout until a new resolving will be attempted
+ type: int
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/dns_host"
+ key_to_check_for_changes = ["comment", "hostname", "interface"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=False, default='0.0.0.0'),
+ address6=dict(type='str', required=False, default='::'),
+ comment=dict(type='str', required=False, default=""),
+ hostname=dict(type='str', required=False),
+ interface=dict(type='str', required=False, default=""),
+ resolved=dict(type='bool', required=False, default=False),
+ resolved6=dict(type='bool', required=False, default=False),
+ timeout=dict(type='int', required=False, default=0),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
new file mode 100644
index 00000000..ecf08871
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Create, update or destroy network/interface_address object
+
+description:
+ - Create, update or destroy a network/interface_address object in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The ip4 address of the network/interface_address object.
+ required: true
+ address6:
+ type: str
+ description:
+ - The ip6 address of the network/interface_address object.
+ required: false
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ resolved:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+ resolved6:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a network interface address
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: present
+
+- name: Remove a network interface address
+ network_interface_address:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = ["comment", "address"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ comment=dict(type='str', required=False, default=""),
+ address6=dict(type='str', required=False),
+ resolved=dict(type='bool', required=False),
+ resolved6=dict(type='bool', required=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
new file mode 100644
index 00000000..c1d0f7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address_info
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Get info for a network/interface_address object
+
+description:
+ - Get info for a network/interface_address object in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get network interface address info
+ utm_proxy_interface_address_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
new file mode 100644
index 00000000..caa0085c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_auth_profile
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy reverse_proxy auth_profile entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ aaa:
+ type: list
+ elements: str
+ description:
+ - List of references to utm_aaa objects (allowed users or groups)
+ required: true
+ basic_prompt:
+ type: str
+ description:
+ - The message in the basic authentication prompt
+ required: true
+ backend_mode:
+ type: str
+ description:
+ - Specifies if the backend server needs authentication ([Basic|None])
+ default: None
+ choices:
+ - Basic
+ - None
+ backend_strip_basic_auth:
+ description:
+ - Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ backend_user_prefix:
+ type: str
+ description:
+ - Prefix string to prepend to the username for backend authentication
+ default: ""
+ backend_user_suffix:
+ type: str
+ description:
+ - Suffix string to append to the username for backend authentication
+ default: ""
+ comment:
+ type: str
+ description:
+ - Optional comment string
+ default: ""
+ frontend_cookie:
+ type: str
+ description:
+ - Frontend cookie name
+ frontend_cookie_secret:
+ type: str
+ description:
+ - Frontend cookie secret
+ frontend_form:
+ type: str
+ description:
+ - Frontend authentication form name
+ frontend_form_template:
+ type: str
+ description:
+ - Frontend authentication form template
+ default: ""
+ frontend_login:
+ type: str
+ description:
+ - Frontend login name
+ frontend_logout:
+ type: str
+ description:
+ - Frontend logout name
+ frontend_mode:
+ type: str
+ description:
+ - Frontend authentication mode (Form|Basic)
+ default: Basic
+ choices:
+ - Basic
+ - Form
+ frontend_realm:
+ type: str
+ description:
+ - Frontend authentication realm
+ frontend_session_allow_persistency:
+ description:
+ - Allow session persistency
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+ frontend_session_lifetime:
+ type: int
+ description:
+ - session lifetime
+ required: true
+ frontend_session_lifetime_limited:
+ description:
+ - Specifies if limitation of session lifetime is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_lifetime_scope:
+ type: str
+ description:
+ - scope for frontend_session_lifetime (days|hours|minutes)
+ default: hours
+ choices:
+ - days
+ - hours
+ - minutes
+ frontend_session_timeout:
+ type: int
+ description:
+ - session timeout
+ required: true
+ frontend_session_timeout_enabled:
+ description:
+ - Specifies if session timeout is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_timeout_scope:
+ type: str
+ description:
+ - scope for frontend_session_timeout (days|hours|minutes)
+ default: minutes
+ choices:
+ - days
+ - hours
+ - minutes
+ logout_delegation_urls:
+ type: list
+ elements: str
+ description:
+ - List of logout URLs that logouts are delegated to
+ default: []
+ logout_mode:
+ type: str
+ description:
+ - Mode of logout (None|Delegation)
+ default: None
+ choices:
+ - None
+ - Delegation
+ redirect_to_requested_url:
+ description:
+ - Should a redirect to the requested URL be made
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING]
+ basic_prompt: "Authentication required: Please login"
+ frontend_session_lifetime: 1
+ frontend_session_timeout: 1
+ state: present
+
+- name: Remove UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: absent
+
+- name: Read UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ aaa:
+ description: List of references to utm_aaa objects (allowed users or groups)
+ type: list
+ basic_prompt:
+ description: The message in the basic authentication prompt
+ type: str
+ backend_mode:
+ description: Specifies if the backend server needs authentication ([Basic|None])
+ type: str
+ backend_strip_basic_auth:
+ description: Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ backend_user_prefix:
+ description: Prefix string to prepend to the username for backend authentication
+ type: str
+ backend_user_suffix:
+ description: Suffix string to append to the username for backend authentication
+ type: str
+ comment:
+ description: Optional comment string
+ type: str
+ frontend_cookie:
+ description: Frontend cookie name
+ type: str
+ frontend_form:
+ description: Frontend authentication form name
+ type: str
+ frontend_form_template:
+ description: Frontend authentication form template
+ type: str
+ frontend_login:
+ description: Frontend login name
+ type: str
+ frontend_logout:
+ description: Frontend logout name
+ type: str
+ frontend_mode:
+ description: Frontend authentication mode (Form|Basic)
+ type: str
+ frontend_realm:
+ description: Frontend authentication realm
+ type: str
+ frontend_session_allow_persistency:
+ description: Allow session persistency
+ type: bool
+ frontend_session_lifetime:
+ description: session lifetime
+ type: int
+ frontend_session_lifetime_limited:
+ description: Specifies if limitation of session lifetime is active
+ type: bool
+ frontend_session_lifetime_scope:
+ description: scope for frontend_session_lifetime (days|hours|minutes)
+ type: str
+ frontend_session_timeout:
+ description: session timeout
+ type: int
+ frontend_session_timeout_enabled:
+ description: Specifies if session timeout is active
+ type: bool
+ frontend_session_timeout_scope:
+ description: scope for frontend_session_timeout (days|hours|minutes)
+ type: str
+ logout_delegation_urls:
+ description: List of logout URLs that logouts are delegated to
+ type: list
+ logout_mode:
+ description: Mode of logout (None|Delegation)
+ type: str
+ redirect_to_requested_url:
+ description: Should a redirect to the requested URL be made
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/auth_profile"
+ key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth",
+ "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie",
+ "frontend_cookie_secret", "frontend_form", "frontend_form_template",
+ "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm",
+ "frontend_session_allow_persistency", "frontend_session_lifetime",
+ "frontend_session_lifetime_limited", "frontend_session_lifetime_scope",
+ "frontend_session_timeout", "frontend_session_timeout_enabled",
+ "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode",
+ "redirect_to_requested_url"]
+
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ aaa=dict(type='list', elements='str', required=True),
+ basic_prompt=dict(type='str', required=True),
+ backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]),
+ backend_user_prefix=dict(type='str', required=False, default=""),
+ backend_user_suffix=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ frontend_cookie=dict(type='str', required=False),
+ frontend_cookie_secret=dict(type='str', required=False, no_log=True),
+ frontend_form=dict(type='str', required=False),
+ frontend_form_template=dict(type='str', required=False, default=""),
+ frontend_login=dict(type='str', required=False),
+ frontend_logout=dict(type='str', required=False),
+ frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str', required=False),
+ frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]),
+ frontend_session_lifetime=dict(type='int', required=True),
+ frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_timeout=dict(type='int', required=True),
+ frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
+ logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False])
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
new file mode 100644
index 00000000..ed241af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_exception
+
+author:
+ - Sebastian Schenzel (@RickS-C137)
+
+short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: True
+ type: str
+ op:
+ description:
+ - The operand to be used with the entries of the path parameter
+ default: 'AND'
+ choices:
+ - 'AND'
+ - 'OR'
+ required: False
+ type: str
+ path:
+ description:
+ - The paths the exception in the reverse proxy is defined for
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_custom_threats_filters:
+ description:
+ - A list of threats to be skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_threats_filter_categories:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skipav:
+ description:
+ - Skip the Antivirus Scanning
+ default: False
+ type: bool
+ required: False
+ skipbadclients:
+ description:
+ - Block clients with bad reputation
+ default: False
+ type: bool
+ required: False
+ skipcookie:
+ description:
+ - Skip the Cookie Signing check
+ default: False
+ type: bool
+ required: False
+ skipform:
+ description:
+ - Enable form hardening
+ default: False
+ type: bool
+ required: False
+ skipform_missingtoken:
+ description:
+ - Enable form hardening with missing tokens
+ default: False
+ type: bool
+ required: False
+ skiphtmlrewrite:
+ description:
+ - Protection against SQL
+ default: False
+ type: bool
+ required: False
+ skiptft:
+ description:
+ - Enable true file type control
+ default: False
+ type: bool
+ required: False
+ skipurl:
+ description:
+ - Enable static URL hardening
+ default: False
+ type: bool
+ required: False
+ source:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ status:
+ description:
+ - Status of the exception rule set
+ default: True
+ type: bool
+ required: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ comment:
+ description: The optional comment string
+ type: str
+ op:
+ description: The operand to be used with the entries of the path parameter
+ type: str
+ path:
+ description: The paths the exception in the reverse proxy is defined for
+ type: list
+ skip_custom_threats_filters:
+ description: A list of threats to be skipped
+ type: list
+ skip_threats_filter_categories:
+ description: Define which categories of threats are skipped
+ type: list
+ skipav:
+ description: Skip the Antivirus Scanning
+ type: bool
+ skipbadclients:
+ description: Block clients with bad reputation
+ type: bool
+ skipcookie:
+ description: Skip the Cookie Signing check
+ type: bool
+ skipform:
+ description: Enable form hardening
+ type: bool
+ skipform_missingtoken:
+ description: Enable form hardening with missing tokens
+ type: bool
+ skiphtmlrewrite:
+ description: Protection against SQL
+ type: bool
+ skiptft:
+ description: Enable true file type control
+ type: bool
+ skipurl:
+ description: Enable static URL hardening
+ type: bool
+ source:
+ description: Define which categories of threats are skipped
+ type: list
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/exception"
+ key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
+ "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
+ "skiphtmlrewrite", "skiptft", "skipurl", "source"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', required=False, default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
+ skipav=dict(type='bool', required=False, default=False),
+ skipbadclients=dict(type='bool', required=False, default=False),
+ skipcookie=dict(type='bool', required=False, default=False),
+ skipform=dict(type='bool', required=False, default=False),
+ skipform_missingtoken=dict(type='bool', required=False, default=False),
+ skiphtmlrewrite=dict(type='bool', required=False, default=False),
+ skiptft=dict(type='bool', required=False, default=False),
+ skipurl=dict(type='bool', required=False, default=False),
+ source=dict(type='list', elements='str', required=False, default=[]),
+ status=dict(type='bool', required=False, default=True),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
new file mode 100644
index 00000000..8dba3640
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ add_content_type_header :
+ description:
+ - Whether to add the content type header or not
+ type: bool
+ default: False
+ address:
+ type: str
+ description:
+ - The reference name of the network/interface_address object.
+ default: REF_DefaultInternalAddress
+ allowed_networks:
+ type: list
+ elements: str
+ description:
+ - A list of reference names for the allowed networks.
+ default: ['REF_NetworkAny']
+ certificate:
+ type: str
+ description:
+ - The reference name of the ca/host_key_cert object.
+ default: ""
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ""
+ disable_compression:
+ description:
+ - Whether to enable the compression
+ type: bool
+ default: False
+ domain:
+ type: list
+ elements: str
+ description:
+ - A list of domain names for the frontend object
+ exceptions:
+ type: list
+ elements: str
+ description:
+ - A list of exception ref names (reverse_proxy/exception)
+ default: []
+ htmlrewrite:
+ description:
+ - Whether to enable html rewrite or not
+ type: bool
+ default: False
+ htmlrewrite_cookies:
+ description:
+ - Whether to enable html rewrite cookie or not
+ type: bool
+ default: False
+ implicitredirect:
+ description:
+ - Whether to enable implicit redirection or not
+ type: bool
+ default: False
+ lbmethod:
+ type: str
+ description:
+ - Which loadbalancer method should be used
+ choices:
+ - ""
+ - bybusyness
+ - bytraffic
+ - byrequests
+ default: bybusyness
+ locations:
+ type: list
+ elements: str
+ description:
+ - A list of location ref names (reverse_proxy/location)
+ default: []
+ port:
+ type: int
+ description:
+ - The frontend http port
+ default: 80
+ preservehost:
+ description:
+ - Whether to preserve host header
+ type: bool
+ default: False
+ profile:
+ type: str
+ description:
+ - The reference string of the reverse_proxy/profile
+ default: ""
+ status:
+ description:
+ - Whether to activate the frontend entry or not
+ type: bool
+ default: True
+ type:
+ type: str
+ description:
+ - Which protocol should be used
+ choices:
+ - http
+ - https
+ default: http
+ xheaders:
+ description:
+ - Whether to pass the host header or not
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ host: REF_OBJECT_STRING
+ state: present
+
+- name: Remove utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: Whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: Whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
+ "comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
+ "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
+ "port", "preservehost", "profile", "status", "type", "xheaders"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ add_content_type_header=dict(type='bool', required=False, default=False),
+ address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
+ certificate=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ disable_compression=dict(type='bool', required=False, default=False),
+ domain=dict(type='list', elements='str', required=False),
+ exceptions=dict(type='list', elements='str', required=False, default=[]),
+ htmlrewrite=dict(type='bool', required=False, default=False),
+ htmlrewrite_cookies=dict(type='bool', required=False, default=False),
+ implicitredirect=dict(type='bool', required=False, default=False),
+ lbmethod=dict(type='str', required=False, default="bybusyness",
+ choices=['bybusyness', 'bytraffic', 'byrequests', '']),
+ locations=dict(type='list', elements='str', required=False, default=[]),
+ port=dict(type='int', required=False, default=80),
+ preservehost=dict(type='bool', required=False, default=False),
+ profile=dict(type='str', required=False, default=""),
+ status=dict(type='bool', required=False, default=True),
+ type=dict(type='str', required=False, default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
new file mode 100644
index 00000000..450bd161
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get utm proxy_frontend
+ community.general.utm_proxy_frontend_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestBackendEntry
+ host: REF_OBJECT_STRING
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
new file mode 100644
index 00000000..7c4bc8b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ access_control:
+ description:
+ - whether to activate the access control for the location
+ type: str
+ default: '0'
+ choices:
+ - '0'
+ - '1'
+ allowed_networks:
+ description:
+ - A list of allowed networks
+ type: list
+ elements: str
+ default: REF_NetworkAny
+ auth_profile:
+ type: str
+ description:
+ - The reference name of the auth profile
+ backend:
+ type: list
+ elements: str
+ description:
+ - A list of backends that are connected with this location declaration
+ default: []
+ be_path:
+ type: str
+ description:
+ - The path of the backend
+ comment:
+ type: str
+ description:
+ - The optional comment string
+ denied_networks:
+ type: list
+ elements: str
+ description:
+ - A list of denied network references
+ default: []
+ hot_standby:
+ description:
+ - Activate hot standby mode
+ type: bool
+ default: False
+ path:
+ type: str
+ description:
+ - The path of the location
+ default: "/"
+ status:
+ description:
+ - Whether the location is active or not
+ type: bool
+ default: True
+ stickysession_id:
+ type: str
+ description:
+ - The stickysession id
+ default: ROUTEID
+ stickysession_status:
+ description:
+ - Enable the stickysession
+ type: bool
+ default: False
+ websocket_passthrough:
+ description:
+ - Enable the websocket passthrough
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
+ "denied_networks", "hot_standby", "path", "status", "stickysession_id",
+ "stickysession_status", "websocket_passthrough"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', required=False, default=""),
+ backend=dict(type='list', elements='str', required=False, default=[]),
+ be_path=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ denied_networks=dict(type='list', elements='str', required=False, default=[]),
+ hot_standby=dict(type='bool', required=False, default=False),
+ path=dict(type='str', required=False, default="/"),
+ status=dict(type='bool', required=False, default=True),
+ stickysession_id=dict(type='str', required=False, default='ROUTEID'),
+ stickysession_status=dict(type='bool', required=False, default=False),
+ websocket_passthrough=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
new file mode 100644
index 00000000..1125c4fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM proxy_location
+ community.general.utm_proxy_location_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py
new file mode 100644
index 00000000..15fd9c62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Bryan Gurney (@bgurney-rh)
+
+module: vdo
+
+short_description: Module to control VDO
+
+
+description:
+ - This module controls the VDO dedupe and compression device.
+ - VDO, or Virtual Data Optimizer, is a device-mapper target that
+ provides inline block-level deduplication, compression, and
+ thin provisioning capabilities to primary storage.
+
+options:
+ name:
+ description:
+ - The name of the VDO volume.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether this VDO volume should be "present" or "absent".
+ If a "present" VDO volume does not exist, it will be
+ created. If a "present" VDO volume already exists, it
+ will be modified, by updating the configuration, which
+ will take effect when the VDO volume is restarted.
+ Not all parameters of an existing VDO volume can be
+ modified; the "statusparamkeys" list contains the
+ parameters that can be modified after creation. If an
+ "absent" VDO volume does not exist, it will not be
+ removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ activated:
+ description:
+ - The "activate" status for a VDO volume. If this is set
+ to "no", the VDO volume cannot be started, and it will
+ not start on system startup. However, on initial
+ creation, a VDO volume with "activated" set to "off"
+ will be running, until stopped. This is the default
+ behavior of the "vdo create" command; it provides the
+ user an opportunity to write a base amount of metadata
+ (filesystem, LVM headers, etc.) to the VDO volume prior
+ to stopping the volume, and leaving it deactivated
+ until ready to use.
+ type: bool
+ running:
+ description:
+ - Whether this VDO volume is running.
+ - A VDO volume must be activated in order to be started.
+ type: bool
+ device:
+ description:
+ - The full path of the device to use for VDO storage.
+ - This is required if "state" is "present".
+ type: str
+ logicalsize:
+ description:
+ - The logical size of the VDO volume (in megabytes, or
+ LVM suffix format). If not specified for a new volume,
+ this defaults to the same size as the underlying storage
+ device, which is specified in the 'device' parameter.
+ Existing volumes will maintain their size if the
+ logicalsize parameter is not specified, or is smaller
+ than or identical to the current size. If the specified
+ size is larger than the current size, a growlogical
+ operation will be performed.
+ type: str
+ deduplication:
+ description:
+ - Configures whether deduplication is enabled. The
+ default for a created volume is 'enabled'. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ compression:
+ description:
+ - Configures whether compression is enabled. The default
+ for a created volume is 'enabled'. Existing volumes
+ will maintain their previously configured setting unless
+ a different value is specified in the playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ blockmapcachesize:
+ description:
+ - The amount of memory allocated for caching block map
+ pages, in megabytes (or may be issued with an LVM-style
+ suffix of K, M, G, or T). The default (and minimum)
+ value is 128M. The value specifies the size of the
+ cache; there is a 15% memory usage overhead. Each 1.25G
+ of block map covers 1T of logical blocks, therefore a
+ small amount of block map cache memory can cache a
+ significantly large amount of block map data. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ readcache:
+ description:
+ - Enables or disables the read cache. The default is
+ 'disabled'. Choosing 'enabled' enables a read cache
+ which may improve performance for workloads of high
+ deduplication, read workloads with a high level of
+ compression, or on hard disk storage. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ choices: [ disabled, enabled ]
+ readcachesize:
+ description:
+ - Specifies the extra VDO device read cache size in
+ megabytes. This is in addition to a system-defined
+ minimum. Using a value with a suffix of K, M, G, or T
+ is optional. The default value is 0. 1.125 MB of
+ memory per bio thread will be used per 1 MB of read
+ cache specified (for example, a VDO volume configured
+ with 4 bio threads will have a read cache memory usage
+ overhead of 4.5 MB per 1 MB of read cache specified).
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ emulate512:
+ description:
+ - Enables 512-byte emulation mode, allowing drivers or
+ filesystems to access the VDO volume at 512-byte
+ granularity, instead of the default 4096-byte granularity.
+ Default is 'disabled'; only recommended when a driver
+ or filesystem requires 512-byte sector level access to
+ a device. This option is only available when creating
+ a new volume, and cannot be changed for an existing
+ volume.
+ type: bool
+ default: false
+ growphysical:
+ description:
+ - Specifies whether to attempt to execute a growphysical
+ operation, if there is enough unused space on the
+ device. A growphysical operation will be executed if
+ there is at least 64 GB of free space, relative to the
+ previous physical size of the affected VDO volume.
+ type: bool
+ default: false
+ slabsize:
+ description:
+ - The size of the increment by which the physical size of
+ a VDO volume is grown, in megabytes (or may be issued
+ with an LVM-style suffix of K, M, G, or T). Must be a
+ power of two between 128M and 32G. The default is 2G,
+ which supports volumes having a physical size up to 16T.
+ The maximum, 32G, supports a physical size of up to 256T.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ writepolicy:
+ description:
+ - Specifies the write policy of the VDO volume. The
+ 'sync' mode acknowledges writes only after data is on
+ stable storage. The 'async' mode acknowledges writes
+ when data has been cached for writing to stable
+ storage. The default (and highly recommended) 'auto'
+ mode checks the storage device to determine whether it
+ supports flushes. Devices that support flushes will
+ result in a VDO volume in 'async' mode, while devices
+ that do not support flushes will run in sync mode.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is
+ specified in the playbook.
+ type: str
+ choices: [ async, auto, sync ]
+ indexmem:
+ description:
+ - Specifies the amount of index memory in gigabytes. The
+ default is 0.25. The special decimal values 0.25, 0.5,
+ and 0.75 can be used, as can any positive integer.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ indexmode:
+ description:
+ - Specifies the index mode of the Albireo index. The
+ default is 'dense', which has a deduplication window of
+ 1 GB of index memory per 1 TB of incoming data,
+ requiring 10 GB of index data on persistent storage.
+ The 'sparse' mode has a deduplication window of 1 GB of
+ index memory per 10 TB of incoming data, but requires
+ 100 GB of index data on persistent storage. This option
+ is only available when creating a new volume, and cannot
+ be changed for an existing volume.
+ type: str
+ choices: [ dense, sparse ]
+ ackthreads:
+ description:
+ - Specifies the number of threads to use for
+ acknowledging completion of requested VDO I/O operations.
+ Valid values are integer values from 1 to 100 (lower
+ numbers are preferable due to overhead). The default is
+ 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ biothreads:
+ description:
+ - Specifies the number of threads to use for submitting I/O
+ operations to the storage device. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 4.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ cputhreads:
+ description:
+ - Specifies the number of threads to use for CPU-intensive
+ work such as hashing or compression. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 2.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ logicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on logical
+ block addresses. Valid values are integer values from
+ 1 to 100 (lower numbers are preferable due to overhead).
+ The default is 1. Existing volumes will maintain their
+ previously configured setting unless a different value
+ is specified in the playbook.
+ type: str
+ physicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on physical
+ block addresses. Valid values are integer values from
+ 1 to 16 (lower numbers are preferable due to overhead).
+ The physical space used by the VDO volume must be
+ larger than (slabsize * physicalthreads). The default
+ is 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+notes:
+ - In general, the default thread configuration should be used.
+requirements:
+ - PyYAML
+ - kmod-kvdo
+ - vdo
+'''
+
+EXAMPLES = r'''
+- name: Create 2 TB VDO volume vdo1 on device /dev/md0
+ community.general.vdo:
+ name: vdo1
+ state: present
+ device: /dev/md0
+ logicalsize: 2T
+
+- name: Remove VDO volume vdo1
+ community.general.vdo:
+ name: vdo1
+ state: absent
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import re
+import traceback
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+
+# Generate a list of VDO volumes, whether they are running or stopped.
+#
+# @param module The AnsibleModule object.
+# @param vdocmd The path of the 'vdo' command.
+#
+# @return vdolist A list of currently created VDO volumes.
+def inventory_vdos(module, vdocmd):
+ rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
+
+ # if rc != 0:
+ # module.fail_json(msg="Inventorying VDOs failed: %s"
+ # % vdostatusout, rc=rc, err=err)
+
+ vdolist = []
+
+ if (rc == 2 and
+ re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
+ # If there is no /etc/vdoconf.yml file, assume there are no
+ # VDO volumes. Return an empty list of VDO volumes.
+ return vdolist
+
+ if rc != 0:
+ module.fail_json(msg="Inventorying VDOs failed: %s"
+ % vdostatusout, rc=rc, err=err)
+
+ vdostatusyaml = yaml.load(vdostatusout)
+ if vdostatusyaml is None:
+ return vdolist
+
+ vdoyamls = vdostatusyaml['VDOs']
+
+ if vdoyamls is not None:
+ vdolist = vdoyamls.keys()
+
+ return vdolist
+
+
+def list_running_vdos(module, vdocmd):
+ rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
+ runningvdolist = filter(None, vdolistout.split('\n'))
+ return runningvdolist
+
+
+# Generate a string containing options to pass to the 'VDO' command.
+# Note that a 'create' operation will pass more options than a
+# 'modify' operation.
+#
+# @param params A dictionary of parameters, and their values
+# (values of 'None' and/or nonexistent values are ignored).
+#
+# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
+def start_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("started VDO volume %s" % vdoname)
+
+ return rc
+
+
+def stop_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("stopped VDO volume %s" % vdoname)
+
+ return rc
+
+
+def activate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s activate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("activated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def deactivate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s deactivate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("deactivated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def add_vdooptions(params):
+ vdocmdoptions = ""
+ options = []
+
+ if ('logicalsize' in params) and (params['logicalsize'] is not None):
+ options.append("--vdoLogicalSize=" + params['logicalsize'])
+
+ if (('blockmapcachesize' in params) and
+ (params['blockmapcachesize'] is not None)):
+ options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
+
+ if ('readcache' in params) and (params['readcache'] == 'enabled'):
+ options.append("--readCache=enabled")
+
+ if ('readcachesize' in params) and (params['readcachesize'] is not None):
+ options.append("--readCacheSize=" + params['readcachesize'])
+
+ if ('slabsize' in params) and (params['slabsize'] is not None):
+ options.append("--vdoSlabSize=" + params['slabsize'])
+
+ if ('emulate512' in params) and (params['emulate512']):
+ options.append("--emulate512=enabled")
+
+ if ('indexmem' in params) and (params['indexmem'] is not None):
+ options.append("--indexMem=" + params['indexmem'])
+
+ if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
+ options.append("--sparseIndex=enabled")
+
+ # Entering an invalid thread config results in a cryptic
+ # 'Could not set up device mapper for %s' error from the 'vdo'
+ # command execution. The dmsetup module on the system will
+ # output a more helpful message, but one would have to log
+ # onto that system to read the error. For now, heed the thread
+ # limit warnings in the DOCUMENTATION section above.
+ if ('ackthreads' in params) and (params['ackthreads'] is not None):
+ options.append("--vdoAckThreads=" + params['ackthreads'])
+
+ if ('biothreads' in params) and (params['biothreads'] is not None):
+ options.append("--vdoBioThreads=" + params['biothreads'])
+
+ if ('cputhreads' in params) and (params['cputhreads'] is not None):
+ options.append("--vdoCpuThreads=" + params['cputhreads'])
+
+ if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
+ options.append("--vdoLogicalThreads=" + params['logicalthreads'])
+
+ if (('physicalthreads' in params) and
+ (params['physicalthreads'] is not None)):
+ options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
+
+ vdocmdoptions = ' '.join(options)
+ return vdocmdoptions
+
+
+def run_module():
+
+ # Define the available arguments/parameters that a user can pass to
+ # the module.
+ # Defaults for VDO parameters are None, in order to facilitate
+ # the detection of parameters passed from the playbook.
+ # Creation param defaults are determined by the creation section.
+
+ module_args = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ activated=dict(type='bool'),
+ running=dict(type='bool'),
+ growphysical=dict(type='bool', default=False),
+ device=dict(type='str'),
+ logicalsize=dict(type='str'),
+ deduplication=dict(type='str', choices=['disabled', 'enabled']),
+ compression=dict(type='str', choices=['disabled', 'enabled']),
+ blockmapcachesize=dict(type='str'),
+ readcache=dict(type='str', choices=['disabled', 'enabled']),
+ readcachesize=dict(type='str'),
+ emulate512=dict(type='bool', default=False),
+ slabsize=dict(type='str'),
+ writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
+ indexmem=dict(type='str'),
+ indexmode=dict(type='str', choices=['dense', 'sparse']),
+ ackthreads=dict(type='str'),
+ biothreads=dict(type='str'),
+ cputhreads=dict(type='str'),
+ logicalthreads=dict(type='str'),
+ physicalthreads=dict(type='str')
+ )
+
+ # Seed the result dictionary in the object. There will be an
+ # 'invocation' dictionary added with 'module_args' (arguments
+ # given).
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
+
+ vdocmd = module.get_bin_path("vdo", required=True)
+ if not vdocmd:
+ module.fail_json(msg='VDO is not installed.', **result)
+
+ # Print a pre-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+
+ runningvdolist = list_running_vdos(module, vdocmd)
+
+ # Collect the name of the desired VDO volume, and its state. These will
+ # determine what to do.
+ desiredvdo = module.params['name']
+ state = module.params['state']
+
+ # Create a desired VDO volume that doesn't exist yet.
+ if (desiredvdo not in vdolist) and (state == 'present'):
+ device = module.params['device']
+ if device is None:
+ module.fail_json(msg="Creating a VDO volume requires specifying "
+ "a 'device' in the playbook.")
+
+ # Create a dictionary of the options from the AnsibleModule
+ # parameters, compile the vdo command options, and run "vdo create"
+ # with those options.
+ # Since this is a creation of a new VDO volume, it will contain all
+ # all of the parameters given by the playbook; the rest will
+ # assume default values.
+ options = module.params
+ vdocmdoptions = add_vdooptions(options)
+ rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
+ % (vdocmd, desiredvdo, device,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Creating VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if (module.params['compression'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableCompression --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if ((module.params['deduplication'] is not None) and
+ module.params['deduplication'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if module.params['activated'] == 'no':
+ deactivate_vdo(module, desiredvdo, vdocmd)
+
+ if module.params['running'] == 'no':
+ stop_vdo(module, desiredvdo, vdocmd)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("created VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # Modify the current parameters of a VDO that exists.
+ if (desiredvdo in vdolist) and (state == 'present'):
+ rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
+ vdostatusyaml = yaml.load(vdostatusoutput)
+
+ # An empty dictionary to contain dictionaries of VDO statistics
+ processedvdos = {}
+
+ vdoyamls = vdostatusyaml['VDOs']
+ if vdoyamls is not None:
+ processedvdos = vdoyamls
+
+ # The 'vdo status' keys that are currently modifiable.
+ statusparamkeys = ['Acknowledgement threads',
+ 'Bio submission threads',
+ 'Block map cache size',
+ 'CPU-work threads',
+ 'Logical threads',
+ 'Physical threads',
+ 'Read cache',
+ 'Read cache size',
+ 'Configured write policy',
+ 'Compression',
+ 'Deduplication']
+
+ # A key translation table from 'vdo status' output to Ansible
+ # module parameters. This covers all of the 'vdo status'
+ # parameter keys that could be modified with the 'vdo'
+ # command.
+ vdokeytrans = {
+ 'Logical size': 'logicalsize',
+ 'Compression': 'compression',
+ 'Deduplication': 'deduplication',
+ 'Block map cache size': 'blockmapcachesize',
+ 'Read cache': 'readcache',
+ 'Read cache size': 'readcachesize',
+ 'Configured write policy': 'writepolicy',
+ 'Acknowledgement threads': 'ackthreads',
+ 'Bio submission threads': 'biothreads',
+ 'CPU-work threads': 'cputhreads',
+ 'Logical threads': 'logicalthreads',
+ 'Physical threads': 'physicalthreads'
+ }
+
+ # Build a dictionary of the current VDO status parameters, with
+ # the keys used by VDO. (These keys will be converted later.)
+ currentvdoparams = {}
+
+ # Build a "lookup table" dictionary containing a translation table
+ # of the parameters that can be modified
+ modtrans = {}
+
+ for statfield in statusparamkeys:
+ if statfield in processedvdos[desiredvdo]:
+ currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
+
+ modtrans[statfield] = vdokeytrans[statfield]
+
+ # Build a dictionary of current parameters formatted with the
+ # same keys as the AnsibleModule parameters.
+ currentparams = {}
+ for paramkey in modtrans.keys():
+ currentparams[modtrans[paramkey]] = modtrans[paramkey]
+
+ diffparams = {}
+
+ # Check for differences between the playbook parameters and the
+ # current parameters. This will need a comparison function;
+ # since AnsibleModule params are all strings, compare them as
+ # strings (but if it's None; skip).
+ for key in currentparams.keys():
+ if module.params[key] is not None:
+ if str(currentparams[key]) != module.params[key]:
+ diffparams[key] = module.params[key]
+
+ if diffparams:
+ vdocmdoptions = add_vdooptions(diffparams)
+ if vdocmdoptions:
+ rc, out, err = module.run_command("%s modify --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Modifying VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'deduplication' in diffparams.keys():
+ dedupemod = diffparams['deduplication']
+ if dedupemod == 'disabled':
+ rc, out, err = module.run_command("%s "
+ "disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if dedupemod == 'enabled':
+ rc, out, err = module.run_command("%s "
+ "enableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'compression' in diffparams.keys():
+ compressmod = diffparams['compression']
+ if compressmod == 'disabled':
+ rc, out, err = module.run_command("%s disableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if compressmod == 'enabled':
+ rc, out, err = module.run_command("%s enableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'writepolicy' in diffparams.keys():
+ writepolmod = diffparams['writepolicy']
+ if writepolmod == 'auto':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'sync':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'async':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Process the size parameters, to determine of a growPhysical or
+ # growLogical operation needs to occur.
+ sizeparamkeys = ['Logical size', ]
+
+ currentsizeparams = {}
+ sizetrans = {}
+ for statfield in sizeparamkeys:
+ currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
+ sizetrans[statfield] = vdokeytrans[statfield]
+
+ sizeparams = {}
+ for paramkey in currentsizeparams.keys():
+ sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
+
+ diffsizeparams = {}
+ for key in sizeparams.keys():
+ if module.params[key] is not None:
+ if str(sizeparams[key]) != module.params[key]:
+ diffsizeparams[key] = module.params[key]
+
+ if module.params['growphysical']:
+ physdevice = module.params['device']
+ rc, devsectors, err = module.run_command("blockdev --getsz %s"
+ % (physdevice))
+ devblocks = (int(devsectors) / 8)
+ dmvdoname = ('/dev/mapper/' + desiredvdo)
+ currentvdostats = (processedvdos[desiredvdo]
+ ['VDO statistics']
+ [dmvdoname])
+ currentphysblocks = currentvdostats['physical blocks']
+
+ # Set a growPhysical threshold to grow only when there is
+ # guaranteed to be more than 2 slabs worth of unallocated
+ # space on the device to use. For now, set to device
+ # size + 64 GB, since 32 GB is the largest possible
+ # slab size.
+ growthresh = devblocks + 16777216
+
+ if currentphysblocks > growthresh:
+ result['changed'] = True
+ rc, out, err = module.run_command("%s growPhysical --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if 'logicalsize' in diffsizeparams.keys():
+ result['changed'] = True
+ vdocmdoptions = ("--vdoLogicalSize=" +
+ diffsizeparams['logicalsize'])
+ rc, out, err = module.run_command("%s growLogical --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+
+ vdoactivatestatus = processedvdos[desiredvdo]['Activate']
+
+ if ((module.params['activated'] == 'no') and
+ (vdoactivatestatus == 'enabled')):
+ deactivate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['activated'] == 'yes') and
+ (vdoactivatestatus == 'disabled')):
+ activate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['running'] == 'no') and
+ (desiredvdo in runningvdolist)):
+ stop_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Note that a disabled VDO volume cannot be started by the
+ # 'vdo start' command, by design. To accurately track changed
+ # status, don't try to start a disabled VDO volume.
+ # If the playbook contains 'activated: yes', assume that
+ # the activate_vdo() operation succeeded, as 'vdoactivatestatus'
+ # will have the activated status prior to the activate_vdo()
+ # call.
+ if (((vdoactivatestatus == 'enabled') or
+ (module.params['activated'] == 'yes')) and
+ (module.params['running'] == 'yes') and
+ (desiredvdo not in runningvdolist)):
+ start_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ if diffparams:
+ module.log("modified parameters of VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+ # Remove a desired VDO that currently exists.
+ if (desiredvdo in vdolist) and (state == 'absent'):
+ rc, out, err = module.run_command("%s remove --name=%s"
+ % (vdocmd, desiredvdo))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Removing VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("removed VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # fall through
+ # The state for the desired VDO volume was absent, and it does
+ # not exist. Print a post-run list of VDO volumes in the result
+ # object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("received request to remove non-existent VDO volume %s"
+ % desiredvdo)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py
new file mode 100644
index 00000000..3d0788e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_configuration
+short_description: Updates Vertica configuration parameters.
+description:
+ - Updates Vertica configuration parameters.
+options:
+ name:
+ description:
+ - Name of the parameter to update.
+ required: true
+ aliases: [parameter]
+ type: str
+ value:
+ description:
+ - Value of the parameter to be set.
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Updating load_balance_policy
+ community.general.vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py
new file mode 100644
index 00000000..bba411d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_role
+short_description: Adds or removes Vertica database roles and assigns roles to them.
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+options:
+ name:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ choices: ['present', 'absent']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica role
+ community.general.vertica_role: name=role_name db=db_name state=present
+
+- name: Creating a new vertica role with other role assigned
+ community.general.vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
+ return False
+ return True
+
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py
new file mode 100644
index 00000000..424de564
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_schema
+short_description: Adds or removes Vertica database schema and roles.
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ default: present
+ choices: ['present', 'absent']
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica schema
+ community.general.vertica_schema: name=schema_name db=db_name state=present
+
+- name: Creating a new schema with specific schema owner
+ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: Creating a new schema with roles
+ community.general.vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
+ return False
+ if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+ return False
+ return True
+
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
+ sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(default=None, aliases=['usage_role']),
+ create_roles=dict(default=None, aliases=['create_role']),
+ owner=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py
new file mode 100644
index 00000000..f550f190
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_user
+short_description: Adds or removes Vertica database users and assigns roles.
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+options:
+ name:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ profile:
+ description:
+ - Sets the user's profile.
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ expired:
+ description:
+ - Sets the user's password expiration.
+ type: bool
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ type: bool
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ choices: ['present', 'absent', 'locked']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica user with password
+ community.general.vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: Creating a new vertica user authenticated via ldap with roles assigned
+ community.general.vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ return False
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ return False
+ return True
+
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(default=None),
+ resource_pool=dict(default=None),
+ password=dict(default=None, no_log=True),
+ expired=dict(type='bool', default=None),
+ ldap=dict(type='bool', default=None),
+ roles=dict(default=None, aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py
new file mode 100644
index 00000000..54bb8c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_eg
+short_description: Manage export groups on Vexata VX100 storage arrays
+description:
+ - Create or delete export groups on a Vexata VX100 array.
+ - An export group is a tuple of a volume group, initiator group and port
+ group that allows a set of volumes to be exposed to one or more hosts
+ through specific array ports.
+author:
+ - Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Export group name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates export group when present or delete when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ vg:
+ description:
+ - Volume group name.
+ type: str
+ ig:
+ description:
+ - Initiator group name.
+ type: str
+ pg:
+ description:
+ - Port group name.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create export group named db_export.
+ community.general.vexata_eg:
+ name: db_export
+ vg: dbvols
+ ig: dbhosts
+ pg: pg1
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete export group named db_export
+ community.general.vexata_eg:
+ name: db_export
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together)
+
+
+def get_eg(module, array):
+ """Retrieve a named vg if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ egs = array.list_egs()
+ eg = filter(lambda eg: eg['name'] == name, egs)
+ if len(eg) == 1:
+ return eg[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve export groups.')
+
+
+def get_vg_id(module, array):
+ """Retrieve a named vg's id if it exists, error if absent."""
+ name = module.params['vg']
+ try:
+ vgs = array.list_vgs()
+ vg = filter(lambda vg: vg['name'] == name, vgs)
+ if len(vg) == 1:
+ return vg[0]['id']
+ else:
+ module.fail_json(msg='Volume group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volume groups.')
+
+
+def get_ig_id(module, array):
+ """Retrieve a named ig's id if it exists, error if absent."""
+ name = module.params['ig']
+ try:
+ igs = array.list_igs()
+ ig = filter(lambda ig: ig['name'] == name, igs)
+ if len(ig) == 1:
+ return ig[0]['id']
+ else:
+ module.fail_json(msg='Initiator group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve initiator groups.')
+
+
+def get_pg_id(module, array):
+ """Retrieve a named pg's id if it exists, error if absent."""
+ name = module.params['pg']
+ try:
+ pgs = array.list_pgs()
+ pg = filter(lambda pg: pg['name'] == name, pgs)
+ if len(pg) == 1:
+ return pg[0]['id']
+ else:
+ module.fail_json(msg='Port group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve port groups.')
+
+
+def create_eg(module, array):
+ """"Create a new export group."""
+ changed = False
+ eg_name = module.params['name']
+ vg_id = get_vg_id(module, array)
+ ig_id = get_ig_id(module, array)
+ pg_id = get_pg_id(module, array)
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ eg = array.create_eg(
+ eg_name,
+ 'Ansible export group',
+ (vg_id, ig_id, pg_id))
+ if eg:
+ module.log(msg='Created export group {0}'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def delete_eg(module, array, eg):
+ changed = False
+ eg_name = eg['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_eg(
+ eg['id'])
+ if ok:
+ module.log(msg='Export group {0} deleted.'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ vg=dict(type='str'),
+ ig=dict(type='str'),
+ pg=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ eg = get_eg(module, array)
+
+ if state == 'present' and not eg:
+ create_eg(module, array)
+ elif state == 'absent' and eg:
+ delete_eg(module, array, eg)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py
new file mode 100644
index 00000000..1cf4cd7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_volume
+short_description: Manage volumes on Vexata VX100 storage arrays
+description:
+ - Create, deletes or extend volumes on a Vexata VX100 array.
+author:
+- Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies volume when present or removes when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ size:
+ description:
+ - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create new 2 TiB volume named foo
+ community.general.vexata_volume:
+ name: foo
+ size: 2T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Expand volume named foo to 4 TiB
+ community.general.vexata_volume:
+ name: foo
+ size: 4T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete volume named foo
+ community.general.vexata_volume:
+ name: foo
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together, size_to_MiB)
+
+
+def get_volume(module, array):
+ """Retrieve a named volume if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ vols = array.list_volumes()
+ vol = filter(lambda v: v['name'] == name, vols)
+ if len(vol) == 1:
+ return vol[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volumes.')
+
+
+def validate_size(module, err_msg):
+ size = module.params.get('size', False)
+ if not size:
+ module.fail_json(msg=err_msg)
+ size = size_to_MiB(size)
+ if size <= 0:
+ module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
+ return size
+
+
+def create_volume(module, array):
+ """"Create a new volume."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to create volume.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.create_volume(
+ module.params['name'],
+ 'Ansible volume',
+ size)
+ if vol:
+ module.log(msg='Created volume {0}'.format(vol['id']))
+ changed = True
+ else:
+ module.fail_json(msg='Volume create failed.')
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def update_volume(module, array, volume):
+ """Expand the volume size."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to update volume')
+ prev_size = volume['volSize']
+ if size <= prev_size:
+ module.log(msg='Volume expanded size needs to be larger '
+ 'than current size.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.grow_volume(
+ volume['name'],
+ volume['description'],
+ volume['id'],
+ size)
+ if vol:
+ changed = True
+ except Exception:
+ pass
+
+ module.exit_json(changed=changed)
+
+
+def delete_volume(module, array, volume):
+ changed = False
+ vol_name = volume['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_volume(
+ volume['id'])
+ if ok:
+ module.log(msg='Volume {0} deleted.'.format(vol_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ volume = get_volume(module, array)
+
+ if state == 'present':
+ if not volume:
+ create_volume(module, array)
+ else:
+ update_volume(module, array, volume)
+ elif state == 'absent' and volume:
+ delete_volume(module, array, volume)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py
new file mode 100644
index 00000000..553e6efc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py
@@ -0,0 +1,796 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vmadm
+short_description: Manage SmartOS virtual machines and zones.
+description:
+ - Manage SmartOS virtual machines through vmadm(1M).
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ archive_on_delete:
+ required: false
+ description:
+ - When enabled, the zone dataset will be mounted on C(/zones/archive)
+ upon removal.
+ type: bool
+ autoboot:
+ required: false
+ description:
+ - Whether or not a VM is booted when the system is rebooted.
+ type: bool
+ brand:
+ choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
+ default: joyent
+ description:
+ - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ type: str
+ boot:
+ required: false
+ description:
+ - Set the boot order for KVM VMs.
+ type: str
+ cpu_cap:
+ required: false
+ description:
+ - Sets a limit on the amount of CPU time that can be used by a VM.
+ Use C(0) for no cap.
+ type: int
+ cpu_shares:
+ required: false
+ description:
+ - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
+ a VM. This limit is relative to all other VMs on the system.
+ type: int
+ cpu_type:
+ required: false
+ choices: [ qemu64, host ]
+ default: qemu64
+ description:
+ - Control the type of virtual CPU exposed to KVM VMs.
+ type: str
+ customer_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contain customer
+ modifiable keys.
+ type: dict
+ delegate_dataset:
+ required: false
+ description:
+ - Whether to delegate a ZFS dataset to an OS VM.
+ type: bool
+ disk_driver:
+ required: false
+ description:
+ - Default value for a virtual disk model for KVM guests.
+ type: str
+ disks:
+ required: false
+ description:
+ - A list of disks to add, valid properties are documented in vmadm(1M).
+ type: list
+ dns_domain:
+ required: false
+ description:
+ - Domain value for C(/etc/hosts).
+ type: str
+ docker:
+ required: false
+ description:
+ - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ type: bool
+ filesystems:
+ required: false
+ description:
+ - Mount additional filesystems into an OS VM.
+ type: list
+ firewall_enabled:
+ required: false
+ description:
+ - Enables the firewall, allowing fwadm(1M) rules to be applied.
+ type: bool
+ force:
+ required: false
+ description:
+ - Force a particular action (i.e. stop or delete a VM).
+ type: bool
+ fs_allowed:
+ required: false
+ description:
+ - Comma separated list of filesystem types this zone is allowed to mount.
+ type: str
+ hostname:
+ required: false
+ description:
+ - Zone/VM hostname.
+ type: str
+ image_uuid:
+ required: false
+ description:
+ - Image UUID.
+ type: str
+ indestructible_delegated:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to delegated datasets.
+ type: bool
+ indestructible_zoneroot:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to zoneroot.
+ type: bool
+ internal_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contains operator
+ generated keys.
+ type: dict
+ internal_metadata_namespace:
+ required: false
+ description:
+ - List of namespaces to be set as I(internal_metadata-only); these namespaces
+ will come from I(internal_metadata) rather than I(customer_metadata).
+ type: str
+ kernel_version:
+ required: false
+ description:
+ - Kernel version to emulate for LX VMs.
+ type: str
+ limit_priv:
+ required: false
+ description:
+ - Set (comma separated) list of privileges the zone is allowed to use.
+ type: str
+ maintain_resolvers:
+ required: false
+ description:
+ - Resolvers in C(/etc/resolv.conf) will be updated when updating
+ the I(resolvers) property.
+ type: bool
+ max_locked_memory:
+ required: false
+ description:
+ - Total amount of memory (in MiBs) on the host that can be locked by this VM.
+ type: int
+ max_lwps:
+ required: false
+ description:
+ - Maximum number of lightweight processes this VM is allowed to have running.
+ type: int
+ max_physical_memory:
+ required: false
+ description:
+ - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
+ type: int
+ max_swap:
+ required: false
+ description:
+ - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
+ type: int
+ mdata_exec_timeout:
+ required: false
+ description:
+ - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
+ that runs user-scripts in the zone.
+ type: int
+ name:
+ required: false
+ aliases: [ alias ]
+ description:
+ - Name of the VM. vmadm(1M) uses this as an optional name.
+ type: str
+ nic_driver:
+ required: false
+ description:
+ - Default value for a virtual NIC model for KVM guests.
+ type: str
+ nics:
+ required: false
+ description:
+ - A list of nics to add, valid properties are documented in vmadm(1M).
+ type: list
+ nowait:
+ required: false
+ description:
+ - Consider the provisioning complete when the VM first starts, rather than
+ when the VM has rebooted.
+ type: bool
+ qemu_opts:
+ required: false
+ description:
+ - Additional qemu arguments for KVM guests. This overwrites the default arguments
+ provided by vmadm(1M) and should only be used for debugging.
+ type: str
+ qemu_extra_opts:
+ required: false
+ description:
+ - Additional qemu cmdline arguments for KVM guests.
+ type: str
+ quota:
+ required: false
+ description:
+ - Quota on zone filesystems (in MiBs).
+ type: int
+ ram:
+ required: false
+ description:
+ - Amount of virtual RAM for a KVM guest (in MiBs).
+ type: int
+ resolvers:
+ required: false
+ description:
+ - List of resolvers to be put into C(/etc/resolv.conf).
+ type: list
+ routes:
+ required: false
+ description:
+ - Dictionary that maps destinations to gateways, these will be set as static
+ routes in the VM.
+ type: dict
+ spice_opts:
+ required: false
+ description:
+ - Addition options for SPICE-enabled KVM VMs.
+ type: str
+ spice_password:
+ required: false
+ description:
+ - Password required to connect to SPICE. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ state:
+ choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
+ default: running
+ description:
+ - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
+ operate on a VM that is currently provisioned. C(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. C(absent) will
+ shutdown the zone before removing it.
+ C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ it down.
+ type: str
+ tmpfs:
+ required: false
+ description:
+ - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
+ type: int
+ uuid:
+ required: false
+ description:
+ - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ type: str
+ vcpus:
+ required: false
+ description:
+ - Number of virtual CPUs for a KVM guest.
+ type: int
+ vga:
+ required: false
+ description:
+ - Specify VGA emulation used by KVM VMs.
+ type: str
+ virtio_txburst:
+ required: false
+ description:
+ - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
+ type: int
+ virtio_txtimer:
+ required: false
+ description:
+ - Timeout (in nanoseconds) for the TX timer of virtio NICs.
+ type: int
+ vnc_password:
+ required: false
+ description:
+ - Password required to connect to VNC. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ vnc_port:
+ required: false
+ description:
+ - TCP port to listen of the VNC server. Or set C(0) for random,
+ or C(-1) to disable.
+ type: int
+ zfs_data_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs data dataset. This option
+ only has effect on delegated datasets.
+ type: str
+ zfs_data_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
+ type: int
+ zfs_filesystem_limit:
+ required: false
+ description:
+ - Maximum number of filesystems the VM can have.
+ type: int
+ zfs_io_priority:
+ required: false
+ description:
+ - IO throttle priority value relative to other VMs.
+ type: int
+ zfs_root_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs root dataset. This option
+ only has effect on the zoneroot dataset.
+ type: str
+ zfs_root_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
+ type: int
+ zfs_snapshot_limit:
+ required: false
+ description:
+ - Number of snapshots the VM can have.
+ type: int
+ zpool:
+ required: false
+ description:
+ - ZFS pool the VM's zone dataset will be created in.
+ type: str
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Create SmartOS zone
+ community.general.vmadm:
+ brand: joyent
+ state: present
+ alias: fw_zone
+ image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
+ firewall_enabled: yes
+ indestructible_zoneroot: yes
+ nics:
+ - nic_tag: admin
+ ip: dhcp
+ primary: true
+ internal_metadata:
+ root_pw: 'secret'
+ quota: 1
+
+- name: Delete a zone
+ community.general.vmadm:
+ alias: test_zone
+ state: deleted
+
+- name: Stop all zones
+ community.general.vmadm:
+ uuid: '*'
+ state: stopped
+'''
+
+RETURN = '''
+uuid:
+ description: UUID of the managed VM.
+ returned: always
+ type: str
+ sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
+alias:
+ description: Alias of the managed VM.
+ returned: When addressing a VM by alias.
+ type: str
+ sample: 'dns-zone'
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'running'
+'''
+
+import json
+import os
+import re
+import tempfile
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+# While vmadm(1M) supports a -E option to return any errors in JSON, the
+# generated JSON does not play well with the JSON parsers of Python.
+# The returned message contains '\n' as part of the stacktrace,
+# which breaks the parsers.
+
+
+def get_vm_prop(module, uuid, prop):
+ # Lookup a property for the given VM.
+ # Returns the property, or None if not found.
+ cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and prop in stdout_json[0]:
+ return stdout_json[0][prop]
+ else:
+ return None
+
+
+def get_vm_uuid(module, alias):
+ # Lookup the uuid that goes with the given alias.
+ # Returns the uuid or '' if not found.
+ cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
+
+ # If no VM was found matching the given alias, we get back an empty array.
+ # That is not an error condition as we might be explicitly checking it's
+ # absence.
+ if stdout.strip() == '[]':
+ return None
+ else:
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
+ return stdout_json[0]['uuid']
+
+
+def get_all_vm_uuids(module):
+ # Retrieve the UUIDs for all VMs.
+ cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg='Failed to get VMs list', exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ return [v['uuid'] for v in stdout_json]
+ except Exception as e:
+ module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
+ exception=traceback.format_exc())
+
+
+def new_vm(module, uuid, vm_state):
+ payload_file = create_payload(module, uuid)
+
+ (rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
+
+ if rc != 0:
+ changed = False
+ module.fail_json(msg='Could not create VM', exception=stderr)
+ else:
+ changed = True
+ # 'vmadm create' returns all output to stderr...
+ match = re.match('Successfully created VM (.*)', stderr)
+ if match:
+ vm_uuid = match.groups()[0]
+ if not is_valid_uuid(vm_uuid):
+ module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
+ else:
+ module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
+
+ # Now that the VM is created, ensure it is in the desired state (if not 'running')
+ if vm_state != 'running':
+ ret = set_vm_state(module, vm_uuid, vm_state)
+ if not ret:
+ module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
+
+ try:
+ os.unlink(payload_file)
+ except Exception as e:
+ # Since the payload may contain sensitive information, fail hard
+ # if we cannot remove the file so the operator knows about it.
+ module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, vm_uuid
+
+
+def vmadm_create_vm(module, payload_file):
+ # Create a new VM using the provided payload.
+ cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
+
+ return module.run_command(cmd)
+
+
+def set_vm_state(module, vm_uuid, vm_state):
+ p = module.params
+
+ # Check if the VM is already in the desired state.
+ state = get_vm_prop(module, vm_uuid, 'state')
+ if state and (state == vm_state):
+ return None
+
+ # Lookup table for the state to be in, and which command to use for that.
+ # vm_state: [vmadm commandm, forceable?]
+ cmds = {
+ 'stopped': ['stop', True],
+ 'running': ['start', False],
+ 'deleted': ['delete', True],
+ 'rebooted': ['reboot', False]
+ }
+
+ if p['force'] and cmds[vm_state][1]:
+ force = '-F'
+ else:
+ force = ''
+
+ cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ match = re.match('^Successfully.*', stderr)
+ if match:
+ return True
+ else:
+ return False
+
+
+def create_payload(module, uuid):
+ # Create the JSON payload (vmdef) and return the filename.
+
+ # Filter out the few options that are not valid VM properties.
+ module_options = ['debug', 'force', 'state']
+ # @TODO make this a simple {} comprehension as soon as py2 is ditched
+ # @TODO {k: v for k, v in p.items() if k not in module_options}
+ vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
+
+ try:
+ vmdef_json = json.dumps(vmdef)
+ except Exception as e:
+ module.fail_json(
+ msg='Could not create valid JSON payload', exception=traceback.format_exc())
+
+ # Create the temporary file that contains our payload, and set tight
+ # permissions for it may container sensitive information.
+ try:
+ # XXX: When there's a way to get the current ansible temporary directory
+ # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
+ # the payload (thus removing the `save_payload` option).
+ fname = tempfile.mkstemp()[1]
+ os.chmod(fname, 0o400)
+ with open(fname, 'w') as fh:
+ fh.write(vmdef_json)
+ except Exception as e:
+ module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
+
+ return fname
+
+
+def vm_state_transition(module, uuid, vm_state):
+ ret = set_vm_state(module, uuid, vm_state)
+
+ # Whether the VM changed state.
+ if ret is None:
+ return False
+ elif ret:
+ return True
+ else:
+ module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
+
+
+def is_valid_uuid(uuid):
+ if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
+ return True
+ else:
+ return False
+
+
+def validate_uuids(module):
+ # Perform basic UUID validation.
+ failed = []
+
+ for u in [['uuid', module.params['uuid']],
+ ['image_uuid', module.params['image_uuid']]]:
+ if u[1] and u[1] != '*':
+ if not is_valid_uuid(u[1]):
+ failed.append(u[0])
+
+ if len(failed) > 0:
+ module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
+
+
+def manage_all_vms(module, vm_state):
+ # Handle operations for all VMs, which can by definition only
+ # be state transitions.
+ state = module.params['state']
+
+ if state == 'created':
+ module.fail_json(msg='State "created" is only valid for tasks with a single VM')
+
+ # If any of the VMs has a change, the task as a whole has a change.
+ any_changed = False
+
+ # First get all VM uuids and for each check their state, and adjust it if needed.
+ for uuid in get_all_vm_uuids(module):
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+ if not current_vm_state and vm_state == 'deleted':
+ any_changed = False
+ else:
+ if module.check_mode:
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ any_changed = True
+ else:
+ any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
+
+ return any_changed
+
+
+def main():
+ # In order to reduce the clutter and boilerplate for trivial options,
+ # abstract the vmadm properties and build the dict of arguments later.
+ # Dict of all options that are simple to define based on their type.
+ # They're not required and have a default of None.
+ properties = {
+ 'str': [
+ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
+ 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
+ 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
+ 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
+ 'zfs_root_compression', 'zpool'
+ ],
+ 'bool': [
+ 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
+ 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
+ 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
+ ],
+ 'int': [
+ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
+ 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
+ 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
+ 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
+ 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
+ 'zfs_snapshot_limit'
+ ],
+ 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
+ 'list': ['disks', 'nics', 'resolvers', 'filesystems']
+ }
+
+ # Start with the options that are not as trivial as those above.
+ options = dict(
+ state=dict(
+ default='running',
+ type='str',
+ choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
+ ),
+ name=dict(
+ default=None, type='str',
+ aliases=['alias']
+ ),
+ brand=dict(
+ default='joyent',
+ type='str',
+ choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
+ ),
+ cpu_type=dict(
+ default='qemu64',
+ type='str',
+ choices=['host', 'qemu64']
+ ),
+ # Regular strings, however these require additional options.
+ spice_password=dict(type='str', no_log=True),
+ vnc_password=dict(type='str', no_log=True),
+ )
+
+ # Add our 'simple' options to options dict.
+ for type in properties:
+ for p in properties[type]:
+ option = dict(default=None, type=type)
+ options[p] = option
+
+ module = AnsibleModule(
+ argument_spec=options,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid']]
+ )
+
+ module.vmadm = module.get_bin_path('vmadm', required=True)
+
+ p = module.params
+ uuid = p['uuid']
+ state = p['state']
+
+ # Translate the state parameter into something we can use later on.
+ if state in ['present', 'running']:
+ vm_state = 'running'
+ elif state in ['stopped', 'created']:
+ vm_state = 'stopped'
+ elif state in ['absent', 'deleted']:
+ vm_state = 'deleted'
+ elif state in ['restarted', 'rebooted']:
+ vm_state = 'rebooted'
+
+ result = {'state': state}
+
+ # While it's possible to refer to a given VM by it's `alias`, it's easier
+ # to operate on VMs by their UUID. So if we're not given a `uuid`, look
+ # it up.
+ if not uuid:
+ uuid = get_vm_uuid(module, p['name'])
+ # Bit of a chicken and egg problem here for VMs with state == deleted.
+ # If they're going to be removed in this play, we have to lookup the
+ # uuid. If they're already deleted there's nothing to lookup.
+ # So if state == deleted and get_vm_uuid() returned '', the VM is already
+ # deleted and there's nothing else to do.
+ if uuid is None and vm_state == 'deleted':
+ result['name'] = p['name']
+ module.exit_json(**result)
+
+ validate_uuids(module)
+
+ if p['name']:
+ result['name'] = p['name']
+ result['uuid'] = uuid
+
+ if uuid == '*':
+ result['changed'] = manage_all_vms(module, vm_state)
+ module.exit_json(**result)
+
+ # The general flow is as follows:
+ # - first the current state of the VM is obtained by it's UUID.
+ # - If the state was not found and the desired state is 'deleted', return.
+ # - If the state was not found, it means the VM has to be created.
+ # Subsequently the VM will be set to the desired state (i.e. stopped)
+ # - Otherwise, it means the VM exists already and we operate on it's
+ # state (i.e. reboot it.)
+ #
+ # In the future it should be possible to query the VM for a particular
+ # property as a valid state (i.e. queried) so the result can be
+ # registered.
+ # Also, VMs should be able to get their properties updated.
+ # Managing VM snapshots should be part of a standalone module.
+
+ # First obtain the VM state to determine what needs to be done with it.
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+
+ # First handle the case where the VM should be deleted and is not present.
+ if not current_vm_state and vm_state == 'deleted':
+ result['changed'] = False
+ elif module.check_mode:
+ # Shortcut for check mode, if there is no VM yet, it will need to be created.
+ # Or, if the VM is not in the desired state yet, it needs to transition.
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+ # No VM was found that matched the given ID (alias or uuid), so we create it.
+ elif not current_vm_state:
+ result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
+ else:
+ # VM was found, operate on its state directly.
+ result['changed'] = vm_state_transition(module, uuid, vm_state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py
new file mode 100644
index 00000000..2f097fcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wakeonlan
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for.
+ required: true
+ type: str
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
+ default: 255.255.255.255
+ type: str
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet.
+ default: 7
+ type: int
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
+seealso:
+- module: community.windows.win_wakeonlan
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ community.general.wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: localhost
+
+- community.general.wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+import socket
+import struct
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible separator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = b''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ if not module.check_mode:
+
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error as e:
+ sock.close()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ sock.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mac=dict(type='str', required=True),
+ broadcast=dict(type='str', default='255.255.255.255'),
+ port=dict(type='int', default=7),
+ ),
+ supports_check_mode=True,
+ )
+
+ mac = module.params['mac']
+ broadcast = module.params['broadcast']
+ port = module.params['port']
+
+ wakeonlan(module, mac, broadcast, port)
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py
new file mode 100644
index 00000000..dcf1656f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+author: Olivier Boukili (@oboukili)
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ type: str
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ default: /balancer-manager/
+ balancer_vhost:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ state:
+ type: str
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ tls:
+ description:
+ - Use https to access balancer management page.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - Validate ssl/tls certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Get all current balancer pool members attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+- name: Get a specific member attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- name: Get attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+
+- name: Enable all balancer pool members
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- name: Step 1
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 2
+ ansible.builtin.wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 3
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+import traceback
+
+BEAUTIFUL_SOUP_IMP_ERR = None
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
+
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ if balancer_member_page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled': 'Dis',
+ 'drained': 'Drn',
+ 'hot_standby': 'Stby',
+ 'ignore_errors': 'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled': '&w_status_D',
+ 'drained': '&w_status_N',
+ 'hot_standby': '&w_status_H',
+ 'ignore_errors': '&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ if response[1]['status'] != 200:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ if page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if apache_version:
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+ else:
+ self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ if not balancer_member_suffix:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+
+ if module.params['state'] is not None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
new file mode 100644
index 00000000..4cc0ef8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+author:
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
+short_description: Enables/disables a module of the Apache2 webserver.
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+options:
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M).
+ This is optional and usually determined automatically by the common convention of
+ appending C(_module) to I(name) as well as custom exception for popular modules.
+ required: False
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: False
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: False
+requirements: ["a2enmod","a2dismod"]
+'''
+
+EXAMPLES = '''
+- name: Enable the Apache2 module wsgi
+ community.general.apache2_module:
+ state: present
+ name: wsgi
+
+- name: Disables the Apache2 module wsgi
+ community.general.apache2_module:
+ state: absent
+ name: wsgi
+
+- name: Disable default modules for Debian
+ community.general.apache2_module:
+ state: absent
+ name: autoindex
+ force: True
+
+- name: Disable mpm_worker and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ state: absent
+ name: mpm_worker
+ ignore_configcheck: True
+
+- name: Enable dump_io module, which is identified as dumpio_module inside apache2
+ community.general.apache2_module:
+ state: present
+ name: dump_io
+ identifier: dumpio_module
+'''
+
+RETURN = '''
+result:
+ description: message about action taken
+ returned: always
+ type: str
+warnings:
+ description: list of warning messages
+ returned: when needed
+ type: list
+rc:
+ description: return code of underlying command
+ returned: failed
+ type: int
+stdout:
+ description: stdout of underlying command
+ returned: failed
+ type: str
+stderr:
+ description: stderr of underlying command
+ returned: failed
+ type: str
+'''
+
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _run_threaded(module):
+ control_binary = _get_ctl_binary(module)
+
+ result, stdout, stderr = module.run_command("%s -V" % control_binary)
+
+ return bool(re.search(r'threaded:[ ]*yes', stdout))
+
+
+def _get_ctl_binary(module):
+ for command in ['apache2ctl', 'apachectl']:
+ ctl_binary = module.get_bin_path(command)
+ if ctl_binary is not None:
+ return ctl_binary
+
+ module.fail_json(
+ msg="Neither of apache2ctl nor apachctl found."
+ " At least one apache control binary is necessary."
+ )
+
+
+def _module_is_enabled(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command("%s -M" % control_binary)
+
+ if result != 0:
+ error_msg = "Error executing %s: %s" % (control_binary, stderr)
+ if module.params['ignore_configcheck']:
+ if 'AH00534' in stderr and 'mpm_' in module.params['name']:
+ module.warnings.append(
+ "No MPM module loaded! apache2 reload AND other module actions"
+ " will fail if no MPM module is loaded immediately."
+ )
+ else:
+ module.warnings.append(error_msg)
+ return False
+ else:
+ module.fail_json(msg=error_msg)
+
+ searchstring = ' ' + module.params['identifier']
+ return searchstring in stdout
+
+
+def create_apache_identifier(name):
+ """
+ By convention if a module is loaded via name, it appears in apache2ctl -M as
+ name_module.
+
+ Some modules don't follow this convention and we use replacements for those."""
+
+ # a2enmod name replacement to apache2ctl -M names
+ text_workarounds = [
+ ('shib', 'mod_shib'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ]
+
+ # re expressions to extract subparts of names
+ re_workarounds = [
+ ('php', r'^(php\d)\.'),
+ ]
+
+ for a2enmod_spelling, module_name in text_workarounds:
+ if a2enmod_spelling in name:
+ return module_name
+
+ for search, reexpr in re_workarounds:
+ if search in name:
+ try:
+ rematch = re.search(reexpr, name)
+ return rematch.group(1) + '_module'
+ except AttributeError:
+ pass
+
+ return name + '_module'
+
+
+def _set_state(module, state):
+ name = module.params['name']
+ force = module.params['force']
+
+ want_enabled = state == 'present'
+ state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
+ a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
+ success_msg = "Module %s %s" % (name, state_string)
+
+ if _module_is_enabled(module) != want_enabled:
+ if module.check_mode:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+
+ a2mod_binary = module.get_bin_path(a2mod_binary)
+ if a2mod_binary is None:
+ module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+
+ if not want_enabled and force:
+ # force exists only for a2dismod on debian
+ a2mod_binary += ' -f'
+
+ result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
+
+ if _module_is_enabled(module) == want_enabled:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+ else:
+ msg = (
+ 'Failed to set module {name} to {state}:\n'
+ '{stdout}\n'
+ 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
+ 'Consider setting the "identifier" option.'
+ ).format(
+ name=name,
+ state=state_string,
+ stdout=stdout,
+ identifier=module.params['identifier']
+ )
+ module.fail_json(msg=msg,
+ rc=result,
+ stdout=stdout,
+ stderr=stderr)
+ else:
+ module.exit_json(changed=False,
+ result=success_msg,
+ warnings=module.warnings)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ identifier=dict(required=False, type='str'),
+ force=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ignore_configcheck=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.warnings = []
+
+ name = module.params['name']
+ if name == 'cgi' and _run_threaded(module):
+ module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
+
+ if not module.params['identifier']:
+ module.params['identifier'] = create_apache_identifier(module.params['name'])
+
+ if module.params['state'] in ['present', 'absent']:
+ _set_state(module, module.params['state'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py
new file mode 100644
index 00000000..641cc1d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ type: path
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ type: str
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ type: str
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ type: str
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ type: path
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ type: path
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ type: str
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ type: int
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ community.general.deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ ansible.builtin.git:
+ repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ community.general.deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- community.general.deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- community.general.deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- community.general.deploy_helper:
+ path: /path/to/root
+- ansible.builtin.debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py
new file mode 100644
index 00000000..10161c04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: django_manage
+short_description: Manages a Django application.
+description:
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
+ C(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+options:
+ command:
+ description:
+ - The name of the Django management command to run. Built in commands are C(cleanup), C(collectstatic),
+ C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may
+ prompt for user input should be run with the C(--noinput) flag.
+ - The module will perform some basic parameter validation (when applicable) to the commands C(cleanup),
+ C(collectstatic), C(createcachetable), C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ type: str
+ required: true
+ project_path:
+ description:
+ - The path to the root of the Django application where B(manage.py) lives.
+ type: path
+ required: true
+ aliases: [app_path, chdir]
+ settings:
+ description:
+ - The Python path to the application's settings module, such as C(myapp.settings).
+ type: path
+ required: false
+ pythonpath:
+ description:
+ - A directory to add to the Python path. Typically used to include the settings module if it is located
+ external to the application directory.
+ type: path
+ required: false
+ aliases: [python_path]
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) installation to use while running the manage application.
+ type: path
+ aliases: [virtual_env]
+ apps:
+ description:
+ - A list of space-delimited apps to target. Used by the C(test) command.
+ type: str
+ required: false
+ cache_table:
+ description:
+ - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ type: str
+ required: false
+ clear:
+ description:
+ - Clear the existing files before trying to copy or link the original file.
+ - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
+ required: false
+ default: no
+ type: bool
+ database:
+ description:
+ - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
+ and C(migrate) commands.
+ type: str
+ required: false
+ failfast:
+ description:
+ - Fail the command immediately if a test fails. Used by the C(test) command.
+ required: false
+ default: false
+ type: bool
+ aliases: [fail_fast]
+ fixtures:
+ description:
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ type: str
+ required: false
+ skip:
+ description:
+ - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ required: false
+ type: bool
+ merge:
+ description:
+ - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
+ parameter with C(migrate) command.
+ required: false
+ type: bool
+ link:
+ description:
+ - Will create links to the files instead of copying them, you can only use this parameter with
+ C(collectstatic) command.
+ required: false
+ type: bool
+ liveserver:
+ description:
+ - This parameter was implemented a long time ago in a galaxy far way. It probably relates to the
+ django-liveserver package, which is no longer updated.
+ - Hence, it will be considered DEPRECATED and should be removed in a future release.
+ type: str
+ required: false
+ aliases: [live_server]
+ testrunner:
+ description:
+ - "From the Django docs: Controls the test runner class that is used to execute tests."
+ - This parameter is passed as-is to C(manage.py).
+ type: str
+ required: false
+ aliases: [test_runner]
+notes:
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
+ is specified.
+ - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
+ exist at the given location.
+ - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ unfortunately.
+ - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ as an app in your settings.
+ - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
+ i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+requirements: [ "virtualenv", "django" ]
+author: "Scott Anderson (@tastychutney)"
+'''
+
+EXAMPLES = """
+- name: Run cleanup on the application installed in django_dir
+ community.general.django_manage:
+ command: cleanup
+ project_path: "{{ django_dir }}"
+
+- name: Load the initial_data fixture into the application
+ community.general.django_manage:
+ command: loaddata
+ project_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
+
+- name: Run syncdb on the application
+ community.general.django_manage:
+ command: syncdb
+ project_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
+
+- name: Run the SmokeTest test case from the main app. Useful for testing deploys
+ community.general.django_manage:
+ command: test
+ project_path: "{{ django_dir }}"
+ apps: main.SmokeTest
+
+- name: Create an initial superuser
+ community.general.django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ project_path: "{{ django_dir }}"
+"""
+
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _fail(module, cmd, out, err, **kwargs):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg, **kwargs)
+
+
+def _ensure_virtualenv(module):
+
+ venv_param = module.params['virtualenv']
+ if venv_param is None:
+ return
+
+ vbin = os.path.join(venv_param, 'bin')
+ activate = os.path.join(vbin, 'activate')
+
+ if not os.path.exists(activate):
+ virtualenv = module.get_bin_path('virtualenv', True)
+ vcmd = [virtualenv, venv_param]
+ rc, out_venv, err_venv = module.run_command(vcmd)
+ if rc != 0:
+ _fail(module, vcmd, out_venv, err_venv)
+
+ os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
+ os.environ["VIRTUAL_ENV"] = venv_param
+
+
+def createcachetable_check_changed(output):
+ return "already exists" not in output
+
+
+def flush_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def loaddata_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def syncdb_filter_output(line):
+ return ("Creating table " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line)
+
+
+def migrate_filter_output(line):
+ return ("Migrating forwards " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line) \
+ or ("Applying" in line)
+
+
+def collectstatic_filter_output(line):
+ return line and "0 static files" not in line
+
+
+def main():
+ command_allowed_param_map = dict(
+ cleanup=(),
+ createcachetable=('cache_table', 'database', ),
+ flush=('database', ),
+ loaddata=('database', 'fixtures', ),
+ syncdb=('database', ),
+ test=('failfast', 'testrunner', 'liveserver', 'apps', ),
+ validate=(),
+ migrate=('apps', 'skip', 'merge', 'database',),
+ collectstatic=('clear', 'link', ),
+ )
+
+ command_required_param_map = dict(
+ loaddata=('fixtures', ),
+ )
+
+ # forces --noinput on every command that needs it
+ noinput_commands = (
+ 'flush',
+ 'syncdb',
+ 'migrate',
+ 'test',
+ 'collectstatic',
+ )
+
+ # These params are allowed for certain commands only
+ specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
+
+ # These params are automatically added to the command if present
+ general_params = ('settings', 'pythonpath', 'database',)
+ specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
+ end_of_command_params = ('apps', 'cache_table', 'fixtures')
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True, type='str'),
+ project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
+ settings=dict(default=None, required=False, type='path'),
+ pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']),
+ virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
+
+ apps=dict(default=None, required=False),
+ cache_table=dict(default=None, required=False, type='str'),
+ clear=dict(default=False, required=False, type='bool'),
+ database=dict(default=None, required=False, type='str'),
+ failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']),
+ fixtures=dict(default=None, required=False, type='str'),
+ liveserver=dict(default=None, required=False, type='str', aliases=['live_server'],
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']),
+ skip=dict(default=None, required=False, type='bool'),
+ merge=dict(default=None, required=False, type='bool'),
+ link=dict(default=None, required=False, type='bool'),
+ ),
+ )
+
+ command = module.params['command']
+ project_path = module.params['project_path']
+ virtualenv = module.params['virtualenv']
+
+ for param in specific_params:
+ value = module.params[param]
+ if param in specific_boolean_params:
+ value = module.boolean(value)
+ if value and param not in command_allowed_param_map[command]:
+ module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
+
+ for param in command_required_param_map.get(command, ()):
+ if not module.params[param]:
+ module.fail_json(msg='%s param is required for command=%s' % (param, command))
+
+ _ensure_virtualenv(module)
+
+ cmd = "./manage.py %s" % (command, )
+
+ if command in noinput_commands:
+ cmd = '%s --noinput' % cmd
+
+ for param in general_params:
+ if module.params[param]:
+ cmd = '%s --%s=%s' % (cmd, param, module.params[param])
+
+ for param in specific_boolean_params:
+ if module.boolean(module.params[param]):
+ cmd = '%s --%s' % (cmd, param)
+
+ # these params always get tacked on the end of the command
+ for param in end_of_command_params:
+ if module.params[param]:
+ cmd = '%s %s' % (cmd, module.params[param])
+
+ rc, out, err = module.run_command(cmd, cwd=project_path)
+ if rc != 0:
+ if command == 'createcachetable' and 'table' in err and 'already exists' in err:
+ out = 'already exists.'
+ else:
+ if "Unknown command:" in err:
+ _fail(module, cmd, err, "Unknown django command: %s" % command)
+ _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
+
+ changed = False
+
+ lines = out.split('\n')
+ filt = globals().get(command + "_filter_output", None)
+ if filt:
+ filtered_output = list(filter(filt, lines))
+ if len(filtered_output):
+ changed = True
+ check_changed = globals().get("{0}_check_changed".format(command), None)
+ if check_changed:
+ changed = check_changed(out)
+
+ module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path,
+ virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py
new file mode 100644
index 00000000..be63c920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+options:
+ username:
+ type: str
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ type: str
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ type: str
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ type: bool
+ state:
+ type: str
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+# Example playbook entries using the ejabberd_user module to manage users state.
+
+- name: Create a user if it does not exist
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: Delete a user if it exists
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ state: absent
+'''
+
+import syslog
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class EjabberdUserException(Exception):
+ """ Base exception for EjabberdUser class object """
+ pass
+
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('check_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return rc
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('check_account', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return not bool(int(rc))
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ if not all(options):
+ raise EjabberdUserException
+
+ cmd = 'ejabberdctl %s ' % cmd
+ cmd += " ".join(options)
+ self.log('command: %s' % cmd)
+ return self.module.run_command(cmd.split())
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('change_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('register', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('unregister', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(default=None, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict(changed=False)
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py
new file mode 100644
index 00000000..57030556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gunicorn
+short_description: Run gunicorn with various settings.
+description:
+ - Starts gunicorn with the parameters specified. Common settings for gunicorn
+ configuration are supported. For additional configuration use a config file
+ See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
+ options. It's recommended to always use the chdir option to avoid problems
+ with the location of the app.
+requirements: [gunicorn]
+author:
+ - "Alejandro Gomez (@agmezr)"
+options:
+ app:
+ type: str
+ required: true
+ aliases: ['name']
+ description:
+ - The app module. A name refers to a WSGI callable that should be found in the specified module.
+ venv:
+ type: path
+ aliases: ['virtualenv']
+ description:
+ - 'Path to the virtualenv directory.'
+ config:
+ type: path
+ description:
+ - 'Path to the gunicorn configuration file.'
+ aliases: ['conf']
+ chdir:
+ type: path
+ description:
+ - 'Chdir to specified directory before apps loading.'
+ pid:
+ type: path
+ description:
+ - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
+ pid file will be created to check a successful run of gunicorn.'
+ worker:
+ type: str
+ choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ description:
+ - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
+ user:
+ type: str
+ description:
+ - 'Switch worker processes to run as this user.'
+notes:
+ - If not specified on config file, a temporary error log will be created on /tmp dir.
+ Please make sure you have write access in /tmp dir. Not needed but will help you to
+ identify any problem with configuration.
+'''
+
+EXAMPLES = '''
+- name: Simple gunicorn run example
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+
+- name: Run gunicorn on a virtualenv
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ venv: '/workspace/example/venv'
+
+- name: Run gunicorn with a config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+
+- name: Run gunicorn as ansible user with specified pid and config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+ venv: '/workspace/example/venv'
+ pid: '/workspace/example/gunicorn.pid'
+ user: 'ansible'
+'''
+
+RETURN = '''
+gunicorn:
+ description: process id of gunicorn
+ returned: changed
+ type: str
+ sample: "1234"
+'''
+
+import os
+import time
+
+# import ansible utils
+from ansible.module_utils.basic import AnsibleModule
+
+
+def search_existing_config(config, option):
+ ''' search in config file for specified option '''
+ if config and os.path.isfile(config):
+ data_config = None
+ with open(config, 'r') as f:
+ for line in f:
+ if option in line:
+ return line
+ return None
+
+
+def remove_tmp_file(file_path):
+ ''' remove temporary files '''
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+def main():
+
+ # available gunicorn options on module
+ gunicorn_options = {
+ 'config': '-c',
+ 'chdir': '--chdir',
+ 'worker': '-k',
+ 'user': '-u',
+ }
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ app=dict(required=True, type='str', aliases=['name']),
+ venv=dict(required=False, type='path', default=None, aliases=['virtualenv']),
+ config=dict(required=False, default=None, type='path', aliases=['conf']),
+ chdir=dict(required=False, type='path', default=None),
+ pid=dict(required=False, type='path', default=None),
+ user=dict(required=False, type='str'),
+ worker=dict(required=False,
+ type='str',
+ choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ ),
+ )
+ )
+
+ # temporary files in case no option provided
+ tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
+ tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
+
+ # remove temp file if exists
+ remove_tmp_file(tmp_pid_file)
+ remove_tmp_file(tmp_error_log)
+
+ # obtain app name and venv
+ params = module.params
+ app = params['app']
+ venv = params['venv']
+ pid = params['pid']
+
+ # use venv path if exists
+ if venv:
+ gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
+ else:
+ gunicorn_command = 'gunicorn'
+
+ # to daemonize the process
+ options = ["-D"]
+
+ # fill options
+ for option in gunicorn_options:
+ param = params[option]
+ if param:
+ options.append(gunicorn_options[option])
+ options.append(param)
+
+ error_log = search_existing_config(params['config'], 'errorlog')
+ if not error_log:
+ # place error log somewhere in case of fail
+ options.append("--error-logfile")
+ options.append(tmp_error_log)
+
+ pid_file = search_existing_config(params['config'], 'pid')
+ if not params['pid'] and not pid_file:
+ pid = tmp_pid_file
+
+ # add option for pid file if not found on config file
+ if not pid_file:
+ options.append('--pid')
+ options.append(pid)
+
+ # put args together
+ args = [gunicorn_command] + options + [app]
+ rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
+
+ if not err:
+ # wait for gunicorn to dump to log
+ time.sleep(0.5)
+ if os.path.isfile(pid):
+ with open(pid, 'r') as f:
+ result = f.readline().strip()
+
+ if not params['pid']:
+ os.remove(pid)
+
+ module.exit_json(changed=True, pid=result, debug=" ".join(args))
+ else:
+ # if user defined own error log, check that
+ if error_log:
+ error = 'Please check your {0}'.format(error_log.strip())
+ else:
+ if os.path.isfile(tmp_error_log):
+ with open(tmp_error_log, 'r') as f:
+ error = f.read()
+ # delete tmp log
+ os.remove(tmp_error_log)
+ else:
+ error = "Log not found"
+
+ module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
+
+ else:
+ module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
new file mode 100644
index 00000000..6ff04131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: htpasswd
+short_description: manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+options:
+ path:
+ type: path
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ type: str
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ type: str
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ type: str
+ required: false
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ md5_crypt and sha256_crypt, which are linux passwd hashes. If you
+ do so the password file will not be compatible with Apache or Nginx
+ - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext)'
+ state:
+ type: str
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: "yes"
+ description:
+ - Used with C(state=present). If specified, the file will be created
+ if it does not already exist. If set to "no", will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = """
+- name: Add a user to a password file and ensure permissions are set
+ community.general.htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+- name: Remove a user from a password file
+ community.general.htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+- name: Add a user to a password file suitable for use by libpam-pwdfile
+ community.general.htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(type='path', required=True, aliases=["dest", "destfile"]),
+ name=dict(type='str', required=True, aliases=["username"]),
+ password=dict(type='str', required=False, default=None, no_log=True),
+ crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ create=dict(type='bool', default=True),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py
new file mode 100644
index 00000000..4c077a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: jboss
+short_description: Deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem.
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment.
+ type: str
+ src:
+ description:
+ - The remote path of the application ear or war to deploy.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ type: path
+ deploy_path:
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens.
+ type: path
+ state:
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed.
+ type: str
+notes:
+ - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
+ - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ Duration of waiting time depends on scan-interval parameter from standalone.xml.
+ - Ensure no identically named application is deployed through the JBoss CLI
+seealso:
+- name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
+author:
+ - Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r"""
+- name: Deploy a hello world application to the default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
+- name: Update the hello world application to the non-default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deploy_path: /opt/wildfly/deployment
+ deployment: hello.war
+ state: present
+
+- name: Undeploy the hello world application from the default deploy_path
+ community.general.jboss:
+ deployment: hello.war
+ state: absent
+"""
+
+RETURN = r""" # """
+
+import os
+import shutil
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+
+DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments'
+
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
+
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
+
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path'),
+ deployment=dict(type='str', required=True),
+ deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH),
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ ),
+ required_if=[('state', 'present', ('src',))],
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ if state == 'absent' and src:
+ module.warn('Parameter src is ignored when state=absent')
+ elif state == 'present' and not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.' % src)
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ # === when check_mode ===
+ if module.check_mode:
+ if state == 'present':
+ if not deployed:
+ result['changed'] = True
+
+ elif deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ result['changed'] = True
+
+ elif state == 'absent' and deployed:
+ result['changed'] = True
+
+ module.exit_json(**result)
+ # =======================
+
+ if state == 'present' and not deployed:
+ if is_failed(deploy_path, deployment):
+ # Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
+
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py
new file mode 100644
index 00000000..0e06b5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ type: str
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mutually exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mutually exclusive with C(config).
+ - Considered if C(state=present).
+ type: bool
+ required: false
+ name:
+ type: str
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ type: str
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ type: str
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins job using basic authentication
+ community.general.jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: http://localhost:8080
+ user: admin
+
+- name: Create a jenkins job using the token
+ community.general.jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+'''
+
+import traceback
+import xml.etree.ElementTree as ET
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class JenkinsJob:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ self.EXCL_STATE = "excluded state"
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
+
+ def get_job_status(self):
+ try:
+ response = self.server.get_job_info(self.name)
+ if "color" not in response:
+ return self.EXCL_STATE
+ else:
+ return to_native(response['color'])
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception as e:
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled is False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif (status != self.EXCL_STATE and self.has_state_changed(status)):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str)).decode('ascii')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ config=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
+ enabled=dict(required=False, type='bool'),
+ token=dict(type='str', required=False, no_log=True),
+ url=dict(type='str', required=False, default="http://localhost:8080"),
+ user=dict(type='str', required=False)
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py
new file mode 100644
index 00000000..e2adf7a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ type: str
+ description:
+ - Name of the Jenkins group on the OS.
+ default: jenkins
+ jenkins_home:
+ type: path
+ description:
+ - Home directory of the Jenkins user.
+ default: /var/lib/jenkins
+ mode:
+ type: raw
+ description:
+ - File mode applied on versioned plugins.
+ default: '0644'
+ name:
+ type: str
+ description:
+ - Plugin name.
+ required: yes
+ owner:
+ type: str
+ description:
+ - Name of the Jenkins user on the OS.
+ default: jenkins
+ state:
+ type: str
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ timeout:
+ type: int
+ description:
+ - Server connection timeout in secs.
+ default: 30
+ updates_expiration:
+ type: int
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ default: 86400
+ updates_url:
+ type: str
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ default: https://updates.jenkins.io
+ url:
+ type: str
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ version:
+ type: str
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For
+ example if C(1.20) would be unquoted, it would become C(1.2).
+ with_dependencies:
+ description:
+ - Defines whether to install plugin dependencies.
+ - This option takes effect only if the I(version) is not defined.
+ type: bool
+ default: yes
+
+notes:
+ - Plugin installation should be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+extends_documentation_fragment:
+ - url
+ - files
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ version: "1.15"
+
+- name: Pin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to authenticate
+#
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ url_username: admin
+ url_password: p4ssw0rd
+ url: http://localhost:8888
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: "1.4.9"
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ ansible.builtin.service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ ansible.builtin.uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+from ansible.module_utils.basic import AnsibleModule, to_bytes
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_native
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+ # Cookie jar for crumb session
+ self.cookies = None
+
+ if self._csrf_enabled():
+ self.cookies = cookiejar.LWPCookieJar()
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ if 'useCrumbs' not in csrf_data:
+ self.module.fail_json(
+ msg="Required fields not found in the Crumbs response.",
+ details=csrf_data)
+
+ return csrf_data['useCrumbs']
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.loads(to_native(r.read()))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=to_native(e))
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception as e:
+ self.module.fail_json(msg=msg_exception, details=to_native(e))
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] in [None, 'latest']:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ data = urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ hpi_file = '%s/plugins/%s.hpi' % (
+ self.params['jenkins_home'],
+ self.params['name'])
+
+ if os.path.isfile(hpi_file):
+ os.remove(hpi_file)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ checksum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ with open(plugin_file, 'rb') as plugin_fh:
+ plugin_content = plugin_fh.read()
+ checksum_old = hashlib.sha1(plugin_content).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ checksum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if checksum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ checksum_new = hashlib.sha1(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if checksum_old != checksum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ elif self.params['version'] == 'latest':
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ # If the latest version changed, download it
+ if checksum_old != to_bytes(plugin_data['sha1']):
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ update_fd, updates_file = tempfile.mkstemp()
+ os.write(update_fd, r.read())
+
+ try:
+ os.close(update_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ details=to_native(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file, encoding='utf-8')
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=to_native(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=to_native(e))
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError as e:
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=to_native(e))
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f_fd, tmp_f = tempfile.mkstemp()
+
+ if isinstance(data, (text_type, binary_type)):
+ os.write(tmp_f_fd, data)
+ else:
+ os.write(tmp_f_fd, data.read())
+
+ try:
+ os.close(tmp_f_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=to_native(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(type='str', default='jenkins'),
+ jenkins_home=dict(type='path', default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(type='str', required=True),
+ owner=dict(type='str', default='jenkins'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins.io'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError as e:
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=to_native(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py
new file mode 100644
index 00000000..68f06c27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# encoding: utf-8
+
+# (c) 2016, James Hogarth <james.hogarth@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: James Hogarth (@hogarthj)
+module: jenkins_script
+short_description: Executes a groovy script in the jenkins instance
+description:
+ - The C(jenkins_script) module takes a script plus a dict of values
+ to use within the script and returns the result of the script being run.
+
+options:
+ script:
+ type: str
+ description:
+ - The groovy script to be executed.
+ This gets passed as a string Template if args is defined.
+ required: true
+ url:
+ type: str
+ description:
+ - The jenkins server to execute the script against. The default is a local
+ jenkins instance that is not being proxied through a webserver.
+ default: http://localhost:8080
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ This should only set to C(no) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: 'yes'
+ user:
+ type: str
+ description:
+ - The username to connect to the jenkins server with.
+ password:
+ type: str
+ description:
+ - The password to connect to the jenkins server with.
+ timeout:
+ type: int
+ description:
+ - The request timeout in seconds
+ default: 10
+ args:
+ type: dict
+ description:
+ - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
+
+notes:
+ - Since the script can do anything this does not report on changes.
+ Knowing the script is being run it's important to set changed_when
+ for the ansible output to be clear on any alterations made.
+
+'''
+
+EXAMPLES = '''
+- name: Obtaining a list of plugins
+ community.general.jenkins_script:
+ script: 'println(Jenkins.instance.pluginManager.plugins)'
+ user: admin
+ password: admin
+
+- name: Setting master using a variable to hold a more complicate script
+ ansible.builtin.set_fact:
+ setmaster_mode: |
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
+
+- name: Use the variable as the script
+ community.general.jenkins_script:
+ script: "{{ setmaster_mode }}"
+ args:
+ jenkins_mode: Node.Mode.EXCLUSIVE
+
+- name: Interacting with an untrusted HTTPS connection
+ community.general.jenkins_script:
+ script: "println(Jenkins.instance.pluginManager.plugins)"
+ user: admin
+ password: admin
+ url: https://localhost
+ validate_certs: no
+'''
+
+RETURN = '''
+output:
+ description: Result of script
+ returned: success
+ type: str
+ sample: 'Result: true'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_csrf_protection_enabled(module):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/api/json',
+ timeout=module.params['timeout'],
+ method='GET')
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content).get('useCrumbs', False)
+
+
+def get_crumb(module, cookies):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/crumbIssuer/api/json',
+ method='GET',
+ timeout=module.params['timeout'],
+ cookies=cookies)
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ script=dict(required=True, type="str"),
+ url=dict(required=False, type="str", default="http://localhost:8080"),
+ validate_certs=dict(required=False, type="bool", default=True),
+ user=dict(required=False, type="str", default=None),
+ password=dict(required=False, no_log=True, type="str", default=None),
+ timeout=dict(required=False, type="int", default=10),
+ args=dict(required=False, type="dict", default=None)
+ )
+ )
+
+ if module.params['user'] is not None:
+ if module.params['password'] is None:
+ module.fail_json(msg="password required when user provided", output='')
+ module.params['url_username'] = module.params['user']
+ module.params['url_password'] = module.params['password']
+ module.params['force_basic_auth'] = True
+
+ if module.params['args'] is not None:
+ from string import Template
+ try:
+ script_contents = Template(module.params['script']).substitute(module.params['args'])
+ except KeyError as err:
+ module.fail_json(msg="Error with templating variable: %s" % err, output='')
+ else:
+ script_contents = module.params['script']
+
+ headers = {}
+ cookies = None
+ if is_csrf_protection_enabled(module):
+ cookies = cookiejar.LWPCookieJar()
+ crumb = get_crumb(module, cookies)
+ headers = {crumb['crumbRequestField']: crumb['crumb']}
+
+ resp, info = fetch_url(module,
+ module.params['url'] + "/scriptText",
+ data=urlencode({'script': script_contents}),
+ headers=headers,
+ method="POST",
+ timeout=module.params['timeout'],
+ cookies=cookies)
+
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ result = to_native(resp.read())
+
+ if 'Exception:' in result and 'at java.lang.Thread' in result:
+ module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
+
+ module.exit_json(
+ output=result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py
new file mode 100644
index 00000000..d10be9ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: jira
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ type: str
+ required: true
+ description:
+ - Base URI for the JIRA instance.
+
+ operation:
+ type: str
+ required: true
+ aliases: [ command ]
+ choices: [ comment, create, edit, fetch, link, search, transition, update ]
+ description:
+ - The operation to perform.
+
+ username:
+ type: str
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ type: str
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ type: str
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ type: str
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ type: str
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ type: str
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ type: str
+ required: false
+ description:
+ - An existing issue key to operate on.
+ aliases: ['ticket']
+
+ comment:
+ type: str
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ type: str
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ type: str
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ linktype:
+ type: str
+ required: false
+ description:
+ - Set type of link, when action 'link' selected.
+
+ inwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue from which link will be created.
+
+ outwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue to which link will be created.
+
+ fields:
+ type: dict
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
+ (possibly after merging with other required data, as when passed to create). See examples for more information,
+ and the JIRA REST API for the structure required for various fields.
+
+ jql:
+ required: false
+ description:
+ - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ type: str
+ version_added: '0.2.0'
+
+ maxresults:
+ required: false
+ description:
+ - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when I(operation=search) only, ignored otherwise.
+ type: int
+ version_added: '0.2.0'
+
+ timeout:
+ type: float
+ required: false
+ description:
+ - Set timeout, in seconds, on requests to JIRA API.
+ default: 10
+
+ validate_certs:
+ required: false
+ description:
+ - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates)
+ default: true
+ type: bool
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author:
+- "Steve Smith (@tarka)"
+- "Per Abildgaard Toft (@pertoft)"
+"""
+
+EXAMPLES = r"""
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
+ args:
+ fields:
+ customfield_13225: "test"
+ customfield_12931: '{"value": "Test"}'
+ register: issue
+
+- name: Comment on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
+- name: Set the labels on an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
+
+# Updating a field using operations: add, set & remove
+- name: Change the value of a Select dropdown
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: update
+ args:
+ fields:
+ customfield_12931: [ {'set': {'value': 'Virtual'}} ]
+ customfield_13820: [ {'set': {'value':'Manually'}} ]
+ register: cmdb_issue
+ delegate_to: localhost
+
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
+ register: issue
+
+# Search for an issue
+# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null
+- name: Search for an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: search
+ maxresults: 10
+ jql: project=cmdb AND cf[13225]="test"
+ args:
+ fields:
+ lastViewed: null
+ register: issue
+
+- name: Create a unix account for the reporter
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{ issue.meta.fields.creator.displayName }}'
+
+# You can get list of valid linktypes at /rest/api/2/issueLinkType
+# url of your jira installation.
+- name: Create link from HSP-1 to MKY-1
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ operation: link
+ linktype: Relates
+ inwardissue: HSP-1
+ outwardissue: MKY-1
+
+# Transition an issue by target status
+- name: Close the issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Done
+ args:
+ fields:
+ customfield_14321: [ {'set': {'value': 'Value of Select' }} ]
+ comment: [ { 'add': { 'body' : 'Test' } }]
+
+"""
+
+import base64
+import json
+import sys
+import traceback
+
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+
+from ansible.module_utils._text import to_text, to_bytes, to_native
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(url, user, passwd, timeout, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
+ response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
+ headers={'Content-Type': 'application/json',
+ 'Authorization': "Basic %s" % auth})
+
+ if info['status'] not in (200, 201, 204):
+ error = None
+ try:
+ error = json.loads(info['body'])
+ except Exception:
+ module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc())
+ if error:
+ msg = []
+ for key in ('errorMessages', 'errors'):
+ if error.get(key):
+ msg.append(to_native(error[key]))
+ if msg:
+ module.fail_json(msg=', '.join(msg))
+ module.fail_json(msg=to_native(error))
+ # Fallback print body, if it cant be decoded
+ module.fail_json(msg=to_native(info['body']))
+
+ body = response.read()
+
+ if body:
+ return json.loads(to_text(body, errors='surrogate_or_strict'))
+ return {}
+
+
+def post(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='POST')
+
+
+def put(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='PUT')
+
+
+def get(url, user, passwd, timeout):
+ return request(url, user, passwd, timeout)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': {'key': params['project']},
+ 'summary': params['summary'],
+ 'issuetype': {'name': params['issuetype']}}
+
+ if params['description']:
+ createfields['description'] = params['description']
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def update(restbase, user, passwd, params):
+ data = {
+ "update": params['fields'],
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def search(restbase, user, passwd, params):
+ url = restbase + '/search?jql=' + pathname2url(params['jql'])
+ if params['fields']:
+ fields = params['fields'].keys()
+ url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields])
+ if params['maxresults']:
+ url = url + '&maxResults=' + str(params['maxresults'])
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd, params['timeout'])
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = {'transition': {"id": tid},
+ 'update': params['fields']}
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def link(restbase, user, passwd, params):
+ data = {
+ 'type': {'name': params['linktype']},
+ 'inwardIssue': {'key': params['inwardissue']},
+ 'outwardIssue': {'key': params['outwardissue']},
+ }
+
+ url = restbase + '/issueLink/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(type='str', required=True),
+ operation=dict(type='str', choices=['create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'],
+ aliases=['command'], required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', ),
+ summary=dict(type='str', ),
+ description=dict(type='str', ),
+ issuetype=dict(type='str', ),
+ issue=dict(type='str', aliases=['ticket']),
+ comment=dict(type='str', ),
+ status=dict(type='str', ),
+ assignee=dict(type='str', ),
+ fields=dict(default={}, type='dict'),
+ linktype=dict(type='str', ),
+ inwardissue=dict(type='str', ),
+ outwardissue=dict(type='str', ),
+ jql=dict(type='str', ),
+ maxresults=dict(type='int'),
+ timeout=dict(type='float', default=10),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_if=(
+ ('operation', 'create', ['project', 'issuetype', 'summary']),
+ ('operation', 'comment', ['issue', 'comment']),
+ ('operation', 'fetch', ['issue']),
+ ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
+ ('operation', 'search', ['jql']),
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = {'name': module.params['assignee']}
+
+ if not uri.endswith('/'):
+ uri = uri + '/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ changed, ret = method(restbase, user, passwd, module.params)
+
+ except Exception as e:
+ return module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, meta=ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py
new file mode 100644
index 00000000..3a68f8da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nginx_status_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.nginx_status_info) instead.
+short_description: Retrieve nginx status facts.
+description:
+ - Gathers facts from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
+'''
+
+EXAMPLES = '''
+# Gather status facts from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+
+# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+ timeout: 20
+'''
+
+RETURN = '''
+---
+nginx_status_facts.active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+nginx_status_facts.accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+nginx_status_facts.reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+nginx_status_facts.writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+nginx_status_facts.waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+nginx_status_facts.data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusFacts(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'nginx_status_facts': {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['nginx_status_facts']['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['nginx_status_facts']['active_connections'] = int(match.group(1))
+ result['nginx_status_facts']['accepts'] = int(match.group(2))
+ result['nginx_status_facts']['handled'] = int(match.group(3))
+ result['nginx_status_facts']['requests'] = int(match.group(4))
+ result['nginx_status_facts']['reading'] = int(match.group(5))
+ result['nginx_status_facts']['writing'] = int(match.group(6))
+ result['nginx_status_facts']['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_facts = NginxStatusFacts().run()
+ result = dict(changed=False, ansible_facts=nginx_status_facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py
new file mode 100644
index 00000000..a13a57a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: nginx_status_info
+short_description: Retrieve information on nginx status.
+description:
+ - Gathers information from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
+'''
+
+EXAMPLES = r'''
+# Gather status info from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ register: result
+
+# Gather status info from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ timeout: 20
+ register: result
+'''
+
+RETURN = r'''
+---
+active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusInfo(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['active_connections'] = int(match.group(1))
+ result['accepts'] = int(match.group(2))
+ result['handled'] = int(match.group(3))
+ result['requests'] = int(match.group(4))
+ result['reading'] = int(match.group(5))
+ result['writing'] = int(match.group(6))
+ result['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_info = NginxStatusInfo().run()
+ module.exit_json(changed=False, **nginx_status_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py
new file mode 100644
index 00000000..1caa159b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_acl_policy
+
+short_description: Manage Rundeck ACL policies.
+description:
+ - Create, update and remove Rundeck ACL policies through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ project:
+ type: str
+ description:
+ - Sets the project which receive the ACL policy.
+ - If unset, it's a system ACL policy.
+ policy:
+ type: str
+ description:
+ - Sets the ACL policy content.
+ - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
+ - It can be a YAML string or a pure Ansible inventory YAML object.
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create or update a rundeck ACL policy in project Ansible
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+ project: "Ansible"
+ policy:
+ description: "my policy"
+ context:
+ application: rundeck
+ for:
+ project:
+ - allow: read
+ by:
+ group: "build"
+
+- name: Remove a rundeck system policy
+ community.general.rundeck_acl_policy:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs.
+ returned: failed
+ type: str
+before:
+ description: Dictionary containing ACL policy informations before modification.
+ returned: success
+ type: dict
+after:
+ description: Dictionary containing ACL policy informations after modification.
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils._text import to_text
+import json
+import re
+
+
+class RundeckACLManager:
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != b"":
+ try:
+ json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (str(e), resp))
+ return resp, info
+
+ def get_acl(self):
+ resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
+ return resp
+
+ def create_or_update_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="POST",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 409:
+ self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_acl())
+ else:
+ if facts["contents"] == self.module.params["policy"]:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before=facts, after=facts)
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="PUT",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 200:
+ self.module.exit_json(changed=True, before=facts, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 404:
+ self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
+
+ def remove_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ policy=dict(type='str'),
+ project=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['policy']],
+ ],
+ supports_check_mode=True
+ )
+
+ if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
+ module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-")
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckACLManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_acl()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_acl()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py
new file mode 100644
index 00000000..5c846482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage rundeck projects
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_project
+
+short_description: Manage Rundeck projects.
+description:
+ - Create and remove Rundeck projects through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+
+- name: Remove a rundeck project
+ community.general.rundeck_project:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs
+ returned: failed
+ type: str
+before:
+ description: dictionary containing project information before modification
+ returned: success
+ type: dict
+after:
+ description: dictionary containing project information after modification
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+import json
+
+
+class RundeckProjectManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != "":
+ try:
+ json_resp = json.loads(resp)
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (to_native(e), resp))
+ return resp, info
+
+ def get_project_facts(self):
+ resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
+ return resp
+
+ def create_or_update_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
+
+ resp, info = self.request_rundeck_api("projects", method="POST", data={
+ "name": self.module.params["name"],
+ "config": {}
+ })
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_project_facts())
+ else:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ def remove_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckProjectManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_project()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_project()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py
new file mode 100644
index 00000000..b4aca155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy an aaa group object in Sophos UTM.
+
+description:
+ - Create, update or destroy an aaa group object in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ type: str
+ required: true
+ adirectory_groups:
+ description:
+ - List of adirectory group strings.
+ type: list
+ elements: str
+ adirectory_groups_sids:
+ description:
+ - Dictionary of group sids.
+ type: dict
+ backend_match:
+ description:
+ - The backend for the group.
+ type: str
+ choices:
+ - none
+ - adirectory
+ - edirectory
+ - radius
+ - tacacs
+ - ldap
+ default: none
+ comment:
+ description:
+ - Comment that describes the AAA group.
+ type: str
+ default: ''
+ dynamic:
+ description:
+ - Group type. Is static if none is selected.
+ type: str
+ default: none
+ choices:
+ - none
+ - ipsec_dn
+ - directory_groups
+ edirectory_groups:
+ description:
+ - List of edirectory group strings.
+ type: list
+ elements: str
+ ipsec_dn:
+ description:
+ - The ipsec dn string.
+ type: str
+ ldap_attribute:
+ description:
+ - The ldap attribute to check against.
+ type: str
+ ldap_attribute_value:
+ description:
+ - The ldap attribute value to check against.
+ type: str
+ members:
+ description:
+ - A list of user ref names (aaa/user).
+ type: list
+ elements: str
+ default: []
+ network:
+ description:
+ - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa).
+ type: str
+ default: ""
+ radius_groups:
+ description:
+ - A list of radius group strings.
+ type: list
+ elements: str
+ default: []
+ tacacs_groups:
+ description:
+ - A list of tacacs group strings.
+ type: list
+ elements: str
+ default: []
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ backend_match: ldap
+ dynamic: directory_groups
+ ldap_attributes: memberof
+ ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com"
+ network: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created.
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object.
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked.
+ type: bool
+ _type:
+ description: The type of the object.
+ type: str
+ name:
+ description: The name of the object.
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups.
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS.
+ type: list
+ backend_match:
+ description: The backend to use.
+ type: str
+ comment:
+ description: The comment string.
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group.
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups.
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match.
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against.
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against.
+ type: str
+ members:
+ description: List of member identifiers of the group.
+ type: list
+ network:
+ description: The identifier of the network (network/aaa).
+ type: str
+ radius_group:
+ description: The radius group identifier.
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic",
+ "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members",
+ "network", "radius_groups", "tacacs_groups"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ adirectory_groups_sids=dict(type='dict', required=False, default={}),
+ backend_match=dict(type='str', required=False, default="none",
+ choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
+ comment=dict(type='str', required=False, default=""),
+ dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ ipsec_dn=dict(type='str', required=False, default=""),
+ ldap_attribute=dict(type='str', required=False, default=""),
+ ldap_attribute_value=dict(type='str', required=False, default=""),
+ members=dict(type='list', elements='str', required=False, default=[]),
+ network=dict(type='str', required=False, default=""),
+ radius_groups=dict(type='list', elements='str', required=False, default=[]),
+ tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py
new file mode 100644
index 00000000..6d230c1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: get info for reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - get info for a reverse_proxy frontend entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS
+ type: list
+ backend_match:
+ description: The backend to use
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against
+ type: str
+ members:
+ description: List of member identifiers of the group
+ type: list
+ network:
+ description: The identifier of the network (network/aaa)
+ type: str
+ radius_group:
+ description: The radius group identifier
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py
new file mode 100644
index 00000000..e940f416
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy ca host_key_cert entry in Sophos UTM
+
+description:
+ - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ required: true
+ type: str
+ ca:
+ description:
+ - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ required: true
+ type: str
+ meta:
+ description:
+ - A reference to an existing utm_ca_meta_x509 object.
+ required: true
+ type: str
+ certificate:
+ description:
+ - The certificate in PEM format.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment string.
+ type: str
+ encrypted:
+ description:
+ - Optionally enable encryption.
+ default: False
+ type: bool
+ key:
+ description:
+ - Optional private key in PEM format.
+ type: str
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ ca: REF_ca/signing_ca_OBJECT_STRING
+ meta: REF_ca/meta_x509_OBJECT_STRING
+ certificate: |
+ --- BEGIN CERTIFICATE ---
+ . . .
+ . . .
+ . . .
+ --- END CERTIFICATE ---
+ state: present
+
+- name: Remove a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: absent
+
+- name: Read a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ca=dict(type='str', required=True),
+ meta=dict(type='str', required=True),
+ certificate=dict(type='str', required=True),
+ comment=dict(type='str', required=False),
+ encrypted=dict(type='bool', required=False, default=False),
+ key=dict(type='str', required=False, no_log=True),
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py
new file mode 100644
index 00000000..ad315df9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert_info
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Get info for a ca host_key_cert entry in Sophos UTM
+
+description:
+ - Get info for a ca host_key_cert entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get info for a ca host_key_cert entry
+ community.general.utm_ca_host_key_cert_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py
new file mode 100644
index 00000000..1f080abf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_dns_host
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy dns entry in Sophos UTM
+
+description:
+ - Create, update or destroy a dns entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The IPV4 Address of the entry. Can be left empty for automatic resolving.
+ default: 0.0.0.0
+ address6:
+ type: str
+ description:
+ - The IPV6 Address of the entry. Can be left empty for automatic resolving.
+ default: "::"
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the dns host object
+ hostname:
+ type: str
+ description:
+ - The hostname for the dns host object
+ interface:
+ type: str
+ description:
+ - The reference name of the interface to use. If not provided the default interface will be used
+ resolved:
+ description:
+ - whether the hostname's ipv4 address is already resolved or not
+ default: False
+ type: bool
+ resolved6:
+ description:
+ - whether the hostname's ipv6 address is already resolved or not
+ default: False
+ type: bool
+ timeout:
+ type: int
+ description:
+ - the timeout for the utm to resolve the ip address for the hostname again
+ default: 0
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ hostname: testentry.some.tld
+ state: present
+
+- name: Remove UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ipv4 address of the object
+ type: str
+ address6:
+ description: The ipv6 address of the object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ hostname:
+ description: The hostname of the object
+ type: str
+ interface:
+ description: The reference name of the interface the object is associated with
+ type: str
+ resolved:
+ description: Whether the ipv4 address is resolved or not
+ type: bool
+ resolved6:
+ description: Whether the ipv6 address is resolved or not
+ type: bool
+ timeout:
+ description: The timeout until a new resolving will be attempted
+ type: int
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/dns_host"
+ key_to_check_for_changes = ["comment", "hostname", "interface"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=False, default='0.0.0.0'),
+ address6=dict(type='str', required=False, default='::'),
+ comment=dict(type='str', required=False, default=""),
+ hostname=dict(type='str', required=False),
+ interface=dict(type='str', required=False, default=""),
+ resolved=dict(type='bool', required=False, default=False),
+ resolved6=dict(type='bool', required=False, default=False),
+ timeout=dict(type='int', required=False, default=0),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py
new file mode 100644
index 00000000..ecf08871
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Create, update or destroy network/interface_address object
+
+description:
+ - Create, update or destroy a network/interface_address object in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The ip4 address of the network/interface_address object.
+ required: true
+ address6:
+ type: str
+ description:
+ - The ip6 address of the network/interface_address object.
+ required: false
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ resolved:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+ resolved6:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a network interface address
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: present
+
+- name: Remove a network interface address
+ network_interface_address:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = ["comment", "address"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ comment=dict(type='str', required=False, default=""),
+ address6=dict(type='str', required=False),
+ resolved=dict(type='bool', required=False),
+ resolved6=dict(type='bool', required=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py
new file mode 100644
index 00000000..c1d0f7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address_info
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Get info for a network/interface_address object
+
+description:
+ - Get info for a network/interface_address object in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get network interface address info
+ utm_proxy_interface_address_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py
new file mode 100644
index 00000000..caa0085c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_auth_profile
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy reverse_proxy auth_profile entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ aaa:
+ type: list
+ elements: str
+ description:
+ - List of references to utm_aaa objects (allowed users or groups)
+ required: true
+ basic_prompt:
+ type: str
+ description:
+ - The message in the basic authentication prompt
+ required: true
+ backend_mode:
+ type: str
+ description:
+ - Specifies if the backend server needs authentication ([Basic|None])
+ default: None
+ choices:
+ - Basic
+ - None
+ backend_strip_basic_auth:
+ description:
+ - Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ backend_user_prefix:
+ type: str
+ description:
+ - Prefix string to prepend to the username for backend authentication
+ default: ""
+ backend_user_suffix:
+ type: str
+ description:
+ - Suffix string to append to the username for backend authentication
+ default: ""
+ comment:
+ type: str
+ description:
+ - Optional comment string
+ default: ""
+ frontend_cookie:
+ type: str
+ description:
+ - Frontend cookie name
+ frontend_cookie_secret:
+ type: str
+ description:
+ - Frontend cookie secret
+ frontend_form:
+ type: str
+ description:
+ - Frontend authentication form name
+ frontend_form_template:
+ type: str
+ description:
+ - Frontend authentication form template
+ default: ""
+ frontend_login:
+ type: str
+ description:
+ - Frontend login name
+ frontend_logout:
+ type: str
+ description:
+ - Frontend logout name
+ frontend_mode:
+ type: str
+ description:
+ - Frontend authentication mode (Form|Basic)
+ default: Basic
+ choices:
+ - Basic
+ - Form
+ frontend_realm:
+ type: str
+ description:
+ - Frontend authentication realm
+ frontend_session_allow_persistency:
+ description:
+ - Allow session persistency
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+ frontend_session_lifetime:
+ type: int
+ description:
+ - session lifetime
+ required: true
+ frontend_session_lifetime_limited:
+ description:
+ - Specifies if limitation of session lifetime is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_lifetime_scope:
+ type: str
+ description:
+ - scope for frontend_session_lifetime (days|hours|minutes)
+ default: hours
+ choices:
+ - days
+ - hours
+ - minutes
+ frontend_session_timeout:
+ type: int
+ description:
+ - session timeout
+ required: true
+ frontend_session_timeout_enabled:
+ description:
+ - Specifies if session timeout is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_timeout_scope:
+ type: str
+ description:
+ - scope for frontend_session_timeout (days|hours|minutes)
+ default: minutes
+ choices:
+ - days
+ - hours
+ - minutes
+ logout_delegation_urls:
+ type: list
+ elements: str
+ description:
+ - List of logout URLs that logouts are delegated to
+ default: []
+ logout_mode:
+ type: str
+ description:
+ - Mode of logout (None|Delegation)
+ default: None
+ choices:
+ - None
+ - Delegation
+ redirect_to_requested_url:
+ description:
+ - Should a redirect to the requested URL be made
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING]
+ basic_prompt: "Authentication required: Please login"
+ frontend_session_lifetime: 1
+ frontend_session_timeout: 1
+ state: present
+
+- name: Remove UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: absent
+
+- name: Read UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ aaa:
+ description: List of references to utm_aaa objects (allowed users or groups)
+ type: list
+ basic_prompt:
+ description: The message in the basic authentication prompt
+ type: str
+ backend_mode:
+ description: Specifies if the backend server needs authentication ([Basic|None])
+ type: str
+ backend_strip_basic_auth:
+ description: Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ backend_user_prefix:
+ description: Prefix string to prepend to the username for backend authentication
+ type: str
+ backend_user_suffix:
+ description: Suffix string to append to the username for backend authentication
+ type: str
+ comment:
+ description: Optional comment string
+ type: str
+ frontend_cookie:
+ description: Frontend cookie name
+ type: str
+ frontend_form:
+ description: Frontend authentication form name
+ type: str
+ frontend_form_template:
+ description: Frontend authentication form template
+ type: str
+ frontend_login:
+ description: Frontend login name
+ type: str
+ frontend_logout:
+ description: Frontend logout name
+ type: str
+ frontend_mode:
+ description: Frontend authentication mode (Form|Basic)
+ type: str
+ frontend_realm:
+ description: Frontend authentication realm
+ type: str
+ frontend_session_allow_persistency:
+ description: Allow session persistency
+ type: bool
+ frontend_session_lifetime:
+ description: session lifetime
+ type: int
+ frontend_session_lifetime_limited:
+ description: Specifies if limitation of session lifetime is active
+ type: bool
+ frontend_session_lifetime_scope:
+ description: scope for frontend_session_lifetime (days|hours|minutes)
+ type: str
+ frontend_session_timeout:
+ description: session timeout
+ type: int
+ frontend_session_timeout_enabled:
+ description: Specifies if session timeout is active
+ type: bool
+ frontend_session_timeout_scope:
+ description: scope for frontend_session_timeout (days|hours|minutes)
+ type: str
+ logout_delegation_urls:
+ description: List of logout URLs that logouts are delegated to
+ type: list
+ logout_mode:
+ description: Mode of logout (None|Delegation)
+ type: str
+ redirect_to_requested_url:
+ description: Should a redirect to the requested URL be made
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/auth_profile"
+ key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth",
+ "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie",
+ "frontend_cookie_secret", "frontend_form", "frontend_form_template",
+ "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm",
+ "frontend_session_allow_persistency", "frontend_session_lifetime",
+ "frontend_session_lifetime_limited", "frontend_session_lifetime_scope",
+ "frontend_session_timeout", "frontend_session_timeout_enabled",
+ "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode",
+ "redirect_to_requested_url"]
+
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ aaa=dict(type='list', elements='str', required=True),
+ basic_prompt=dict(type='str', required=True),
+ backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]),
+ backend_user_prefix=dict(type='str', required=False, default=""),
+ backend_user_suffix=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ frontend_cookie=dict(type='str', required=False),
+ frontend_cookie_secret=dict(type='str', required=False, no_log=True),
+ frontend_form=dict(type='str', required=False),
+ frontend_form_template=dict(type='str', required=False, default=""),
+ frontend_login=dict(type='str', required=False),
+ frontend_logout=dict(type='str', required=False),
+ frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str', required=False),
+ frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]),
+ frontend_session_lifetime=dict(type='int', required=True),
+ frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_timeout=dict(type='int', required=True),
+ frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
+ logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False])
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py
new file mode 100644
index 00000000..ed241af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_exception
+
+author:
+ - Sebastian Schenzel (@RickS-C137)
+
+short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: True
+ type: str
+ op:
+ description:
+ - The operand to be used with the entries of the path parameter
+ default: 'AND'
+ choices:
+ - 'AND'
+ - 'OR'
+ required: False
+ type: str
+ path:
+ description:
+ - The paths the exception in the reverse proxy is defined for
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_custom_threats_filters:
+ description:
+ - A list of threats to be skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_threats_filter_categories:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skipav:
+ description:
+ - Skip the Antivirus Scanning
+ default: False
+ type: bool
+ required: False
+ skipbadclients:
+ description:
+ - Block clients with bad reputation
+ default: False
+ type: bool
+ required: False
+ skipcookie:
+ description:
+ - Skip the Cookie Signing check
+ default: False
+ type: bool
+ required: False
+ skipform:
+ description:
+ - Enable form hardening
+ default: False
+ type: bool
+ required: False
+ skipform_missingtoken:
+ description:
+ - Enable form hardening with missing tokens
+ default: False
+ type: bool
+ required: False
+ skiphtmlrewrite:
+ description:
+ - Protection against SQL
+ default: False
+ type: bool
+ required: False
+ skiptft:
+ description:
+ - Enable true file type control
+ default: False
+ type: bool
+ required: False
+ skipurl:
+ description:
+ - Enable static URL hardening
+ default: False
+ type: bool
+ required: False
+ source:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ status:
+ description:
+ - Status of the exception rule set
+ default: True
+ type: bool
+ required: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ comment:
+ description: The optional comment string
+ type: str
+ op:
+ description: The operand to be used with the entries of the path parameter
+ type: str
+ path:
+ description: The paths the exception in the reverse proxy is defined for
+ type: list
+ skip_custom_threats_filters:
+ description: A list of threats to be skipped
+ type: list
+ skip_threats_filter_categories:
+ description: Define which categories of threats are skipped
+ type: list
+ skipav:
+ description: Skip the Antivirus Scanning
+ type: bool
+ skipbadclients:
+ description: Block clients with bad reputation
+ type: bool
+ skipcookie:
+ description: Skip the Cookie Signing check
+ type: bool
+ skipform:
+ description: Enable form hardening
+ type: bool
+ skipform_missingtoken:
+ description: Enable form hardening with missing tokens
+ type: bool
+ skiphtmlrewrite:
+ description: Protection against SQL
+ type: bool
+ skiptft:
+ description: Enable true file type control
+ type: bool
+ skipurl:
+ description: Enable static URL hardening
+ type: bool
+ source:
+ description: Define which categories of threats are skipped
+ type: list
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/exception"
+ key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
+ "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
+ "skiphtmlrewrite", "skiptft", "skipurl", "source"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', required=False, default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
+ skipav=dict(type='bool', required=False, default=False),
+ skipbadclients=dict(type='bool', required=False, default=False),
+ skipcookie=dict(type='bool', required=False, default=False),
+ skipform=dict(type='bool', required=False, default=False),
+ skipform_missingtoken=dict(type='bool', required=False, default=False),
+ skiphtmlrewrite=dict(type='bool', required=False, default=False),
+ skiptft=dict(type='bool', required=False, default=False),
+ skipurl=dict(type='bool', required=False, default=False),
+ source=dict(type='list', elements='str', required=False, default=[]),
+ status=dict(type='bool', required=False, default=True),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py
new file mode 100644
index 00000000..8dba3640
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ add_content_type_header :
+ description:
+ - Whether to add the content type header or not
+ type: bool
+ default: False
+ address:
+ type: str
+ description:
+ - The reference name of the network/interface_address object.
+ default: REF_DefaultInternalAddress
+ allowed_networks:
+ type: list
+ elements: str
+ description:
+ - A list of reference names for the allowed networks.
+ default: ['REF_NetworkAny']
+ certificate:
+ type: str
+ description:
+ - The reference name of the ca/host_key_cert object.
+ default: ""
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ""
+ disable_compression:
+ description:
+ - Whether to enable the compression
+ type: bool
+ default: False
+ domain:
+ type: list
+ elements: str
+ description:
+ - A list of domain names for the frontend object
+ exceptions:
+ type: list
+ elements: str
+ description:
+ - A list of exception ref names (reverse_proxy/exception)
+ default: []
+ htmlrewrite:
+ description:
+ - Whether to enable html rewrite or not
+ type: bool
+ default: False
+ htmlrewrite_cookies:
+ description:
+ - Whether to enable html rewrite cookie or not
+ type: bool
+ default: False
+ implicitredirect:
+ description:
+ - Whether to enable implicit redirection or not
+ type: bool
+ default: False
+ lbmethod:
+ type: str
+ description:
+ - Which loadbalancer method should be used
+ choices:
+ - ""
+ - bybusyness
+ - bytraffic
+ - byrequests
+ default: bybusyness
+ locations:
+ type: list
+ elements: str
+ description:
+ - A list of location ref names (reverse_proxy/location)
+ default: []
+ port:
+ type: int
+ description:
+ - The frontend http port
+ default: 80
+ preservehost:
+ description:
+ - Whether to preserve host header
+ type: bool
+ default: False
+ profile:
+ type: str
+ description:
+ - The reference string of the reverse_proxy/profile
+ default: ""
+ status:
+ description:
+ - Whether to activate the frontend entry or not
+ type: bool
+ default: True
+ type:
+ type: str
+ description:
+ - Which protocol should be used
+ choices:
+ - http
+ - https
+ default: http
+ xheaders:
+ description:
+ - Whether to pass the host header or not
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ host: REF_OBJECT_STRING
+ state: present
+
+- name: Remove utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: Whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: Whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
+ "comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
+ "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
+ "port", "preservehost", "profile", "status", "type", "xheaders"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ add_content_type_header=dict(type='bool', required=False, default=False),
+ address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
+ certificate=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ disable_compression=dict(type='bool', required=False, default=False),
+ domain=dict(type='list', elements='str', required=False),
+ exceptions=dict(type='list', elements='str', required=False, default=[]),
+ htmlrewrite=dict(type='bool', required=False, default=False),
+ htmlrewrite_cookies=dict(type='bool', required=False, default=False),
+ implicitredirect=dict(type='bool', required=False, default=False),
+ lbmethod=dict(type='str', required=False, default="bybusyness",
+ choices=['bybusyness', 'bytraffic', 'byrequests', '']),
+ locations=dict(type='list', elements='str', required=False, default=[]),
+ port=dict(type='int', required=False, default=80),
+ preservehost=dict(type='bool', required=False, default=False),
+ profile=dict(type='str', required=False, default=""),
+ status=dict(type='bool', required=False, default=True),
+ type=dict(type='str', required=False, default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py
new file mode 100644
index 00000000..450bd161
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get utm proxy_frontend
+ community.general.utm_proxy_frontend_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestBackendEntry
+ host: REF_OBJECT_STRING
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
new file mode 100644
index 00000000..7c4bc8b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ access_control:
+ description:
+ - whether to activate the access control for the location
+ type: str
+ default: '0'
+ choices:
+ - '0'
+ - '1'
+ allowed_networks:
+ description:
+ - A list of allowed networks
+ type: list
+ elements: str
+ default: REF_NetworkAny
+ auth_profile:
+ type: str
+ description:
+ - The reference name of the auth profile
+ backend:
+ type: list
+ elements: str
+ description:
+ - A list of backends that are connected with this location declaration
+ default: []
+ be_path:
+ type: str
+ description:
+ - The path of the backend
+ comment:
+ type: str
+ description:
+ - The optional comment string
+ denied_networks:
+ type: list
+ elements: str
+ description:
+ - A list of denied network references
+ default: []
+ hot_standby:
+ description:
+ - Activate hot standby mode
+ type: bool
+ default: False
+ path:
+ type: str
+ description:
+ - The path of the location
+ default: "/"
+ status:
+ description:
+ - Whether the location is active or not
+ type: bool
+ default: True
+ stickysession_id:
+ type: str
+ description:
+ - The stickysession id
+ default: ROUTEID
+ stickysession_status:
+ description:
+ - Enable the stickysession
+ type: bool
+ default: False
+ websocket_passthrough:
+ description:
+ - Enable the websocket passthrough
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
+ "denied_networks", "hot_standby", "path", "status", "stickysession_id",
+ "stickysession_status", "websocket_passthrough"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', required=False, default=""),
+ backend=dict(type='list', elements='str', required=False, default=[]),
+ be_path=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ denied_networks=dict(type='list', elements='str', required=False, default=[]),
+ hot_standby=dict(type='bool', required=False, default=False),
+ path=dict(type='str', required=False, default="/"),
+ status=dict(type='bool', required=False, default=True),
+ stickysession_id=dict(type='str', required=False, default='ROUTEID'),
+ stickysession_status=dict(type='bool', required=False, default=False),
+ websocket_passthrough=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py
new file mode 100644
index 00000000..1125c4fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM proxy_location
+ community.general.utm_proxy_location_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py
new file mode 100644
index 00000000..5524beea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: supervisorctl
+short_description: Manage the state of a program or group of programs running via supervisord
+description:
+ - Manage the state of a program or group of programs running via supervisord
+options:
+ name:
+ type: str
+ description:
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
+ required: true
+ config:
+ type: path
+ description:
+ - The supervisor configuration file path
+ server_url:
+ type: str
+ description:
+ - URL on which supervisord server is listening
+ username:
+ type: str
+ description:
+ - username to use for authentication
+ password:
+ type: str
+ description:
+ - password to use for authentication
+ state:
+ type: str
+ description:
+ - The desired state of program/group.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ signal:
+ type: str
+ description:
+ - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
+ supervisorctl_path:
+ type: path
+ description:
+ - path to supervisorctl executable
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+requirements: [ "supervisorctl" ]
+author:
+ - "Matt Wright (@mattupstate)"
+ - "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program to be in started state
+ community.general.supervisorctl:
+ name: my_app
+ state: started
+
+- name: Manage the state of program group to be in started state
+ community.general.supervisorctl:
+ name: 'my_apps:'
+ state: started
+
+- name: Restart my_app, reading supervisorctl configuration from a specified file
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
+
+- name: Restart my_app, connecting to supervisord with credentials and server URL
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
+
+- name: Send a signal to my_app via supervisorctl
+ community.general.supervisorctl:
+ name: my_app
+ state: signalled
+ signal: USR1
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+
+def main():
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ config=dict(required=False, type='path'),
+ server_url=dict(type='str', required=False),
+ username=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ supervisorctl_path=dict(required=False, type='path'),
+ state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ signal=dict(type='str', required=False)
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
+ state = module.params['state']
+ config = module.params.get('config')
+ server_url = module.params.get('server_url')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ supervisorctl_path = module.params.get('supervisorctl_path')
+ signal = module.params.get('signal')
+
+ # we check error message for a pattern, so we need to make sure that's in C locale
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if supervisorctl_path:
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
+ supervisorctl_args = [supervisorctl_path]
+ else:
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ else:
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
+
+ if config:
+ supervisorctl_args.extend(['-c', config])
+ if server_url:
+ supervisorctl_args.extend(['-s', server_url])
+ if username:
+ supervisorctl_args.extend(['-u', username])
+ if password:
+ supervisorctl_args.extend(['-p', password])
+
+ if state == 'signalled' and not signal:
+ module.fail_json(msg="State 'signalled' requires a 'signal' value")
+
+ def run_supervisorctl(cmd, name=None, **kwargs):
+ args = list(supervisorctl_args) # copy the master args
+ args.append(cmd)
+ if name:
+ args.append(name)
+ return module.run_command(args, **kwargs)
+
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
+ else:
+ if process_name != name:
+ continue
+
+ matched.append((process_name, status))
+ return matched
+
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
+
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
+
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update', check_rc=True)
+ processes = get_matched_processes()
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
+
+ processes = get_matched_processes()
+
+ if state == 'absent':
+ if len(processes) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('remove', name)
+ if '%s: removed process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'started':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
+
+ if state == 'stopped':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
+
+ if state == 'signalled':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py
new file mode 100644
index 00000000..ae8f31c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+options:
+ taiga_host:
+ type: str
+ description:
+ - The hostname of the Taiga instance.
+ default: https://api.taiga.io
+ project:
+ type: str
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ type: str
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ type: str
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ type: str
+ description:
+ - The issue priority. Must exist previously.
+ default: Normal
+ status:
+ type: str
+ description:
+ - The issue status. Must exist previously.
+ default: New
+ severity:
+ type: str
+ description:
+ - The issue severity. Must exist previously.
+ default: Normal
+ description:
+ type: str
+ description:
+ - The issue description.
+ default: ""
+ attachment:
+ type: path
+ description:
+ - Path to a file to be attached to the issue.
+ attachment_description:
+ type: str
+ description:
+ - A string describing the file to be attached to the issue.
+ default: ""
+ tags:
+ type: list
+ elements: str
+ description:
+ - A lists of tags to be assigned to the issue.
+ default: []
+ state:
+ type: str
+ description:
+ - Whether the issue should be present or not.
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+- name: Create an issue in the my hosted Taiga environment and attach an error log
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+- name: Deletes the previously created issue
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+import traceback
+
+from os import getenv
+from os.path import isfile
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+TAIGA_IMP_ERR = None
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED = True
+except ImportError:
+ TAIGA_IMP_ERR = traceback.format_exc()
+ TAIGA_MODULE_IMPORTED = False
+
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException as exc:
+ msg = "An exception happened: %s" % to_native(exc)
+ return (False, changed, msg, {})
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ project=dict(type='str', required=True),
+ subject=dict(type='str', required=True),
+ issue_type=dict(type='str', required=True),
+ priority=dict(type='str', required=False, default="Normal"),
+ status=dict(type='str', required=False, default="New"),
+ severity=dict(type='str', required=False, default="Normal"),
+ description=dict(type='str', required=False, default=""),
+ attachment=dict(type='path', required=False, default=None),
+ attachment_description=dict(type='str', required=False, default=""),
+ tags=dict(required=False, default=[], type='list', elements='str'),
+ state=dict(type='str', required=False, choices=['present', 'absent'],
+ default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ module.fail_json(msg=missing_required_lib("python-taiga"),
+ exception=TAIGA_IMP_ERR)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py
new file mode 100644
index 00000000..9a69ce54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an C(autostart.cgi) script
+ type: bool
+ default: 'no'
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ default: ''
+
+ port_open:
+ description:
+ - IF the port should be opened
+ type: bool
+ default: 'no'
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ community.general.webfaction_app:
+ name: "my_wsgi_app1"
+ state: present
+ type: mod_wsgi35-python27
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(required=False, type='bool', default=False),
+ extra_info=dict(required=False, default=""),
+ port_open=dict(required=False, type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed=False,
+ result=existing_app,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(app_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py
new file mode 100644
index 00000000..19bc6ea2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a webfaction database using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ community.general.webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type=dict(required=True, choices=['mysql', 'postgresql']),
+ password=dict(required=False, default=None, no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed=False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(db_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py
new file mode 100644
index 00000000..a348ef51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains=dict(required=False, default=[], type='list'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
new file mode 100644
index 00000000..144fad29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create webfaction mailbox using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ community.general.webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True, no_log=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py
new file mode 100644
index 00000000..8ae98280
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction website using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ type: bool
+ default: 'no'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ default: []
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create website
+ community.general.webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(required=False, type='list', default=[]),
+ site_apps=dict(required=False, type='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed=False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append((a[0], a[1]))
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py
new file mode 100644
index 00000000..8b1449be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xattr
+short_description: Manage user defined extended attributes
+description:
+ - Manages filesystem user defined extended attributes.
+ - Requires that extended attributes are enabled on the target filesystem
+ and that the setfattr/getfattr utilities are present.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ - Before 2.3 this option was only usable as I(name).
+ type: path
+ required: true
+ aliases: [ name ]
+ namespace:
+ description:
+ - Namespace of the named name/key.
+ type: str
+ default: user
+ key:
+ description:
+ - The name of a specific Extended attribute key to set/retrieve.
+ type: str
+ value:
+ description:
+ - The value to set the named name/key to, it automatically sets the C(state) to 'set'.
+ type: str
+ state:
+ description:
+ - defines which state you want to do.
+ C(read) retrieves the current value for a C(key) (default)
+ C(present) sets C(name) to C(value), default if value is set
+ C(all) dumps all data
+ C(keys) retrieves all keys
+ C(absent) deletes the key
+ type: str
+ choices: [ absent, all, keys, present, read ]
+ default: read
+ follow:
+ description:
+ - If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
+ otherwise acts on symlink itself.
+ type: bool
+ default: yes
+notes:
+ - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Obtain the extended attributes of /etc/foo.conf
+ community.general.xattr:
+ path: /etc/foo.conf
+
+- name: Set the key 'user.foo' to value 'bar'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ value: bar
+
+- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ value: "0x817b94343f164f199e5b573b4ea1f914"
+
+- name: Remove the key 'user.foo'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ state: absent
+
+- name: Remove the key 'trusted.glusterfs.volume-id'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ state: absent
+'''
+
+import os
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_xattr_keys(module, path, follow):
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def get_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ if key is None:
+ cmd.append('-d')
+ else:
+ cmd.append('-n %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def set_xattr(module, path, key, value, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-n %s' % key)
+ cmd.append('-v %s' % value)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def rm_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-x %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def _run_xattr(module, cmd, check_rc=True):
+
+ try:
+ (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
+ except Exception as e:
+ module.fail_json(msg="%s!" % to_native(e))
+
+ # result = {'raw': out}
+ result = {}
+ for line in out.splitlines():
+ if line.startswith('#') or line == '':
+ pass
+ elif '=' in line:
+ (key, val) = line.split('=')
+ result[key] = val.strip('"')
+ else:
+ result[line] = ''
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name']),
+ namespace=dict(type='str', default='user'),
+ key=dict(type='str'),
+ value=dict(type='str'),
+ state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
+ follow=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ path = module.params.get('path')
+ namespace = module.params.get('namespace')
+ key = module.params.get('key')
+ value = module.params.get('value')
+ state = module.params.get('state')
+ follow = module.params.get('follow')
+
+ if not os.path.exists(path):
+ module.fail_json(msg="path not found or not accessible!")
+
+ changed = False
+ msg = ""
+ res = {}
+
+ if key is None and state in ['absent', 'present']:
+ module.fail_json(msg="%s needs a key parameter" % state)
+
+ # Prepend the key with the namespace if defined
+ if (
+ key is not None and
+ namespace is not None and
+ len(namespace) > 0 and
+ not (namespace == 'user' and key.startswith('user.'))):
+ key = '%s.%s' % (namespace, key)
+
+ if (state == 'present' or value is not None):
+ current = get_xattr(module, path, key, follow)
+ if current is None or key not in current or value != current[key]:
+ if not module.check_mode:
+ res = set_xattr(module, path, key, value, follow)
+ changed = True
+ res = current
+ msg = "%s set to %s" % (key, value)
+ elif state == 'absent':
+ current = get_xattr(module, path, key, follow)
+ if current is not None and key in current:
+ if not module.check_mode:
+ res = rm_xattr(module, path, key, follow)
+ changed = True
+ res = current
+ msg = "%s removed" % (key)
+ elif state == 'keys':
+ res = get_xattr_keys(module, path, follow)
+ msg = "returning all keys"
+ elif state == 'all':
+ res = get_xattr(module, path, None, follow)
+ msg = "dumping all"
+ else:
+ res = get_xattr(module, path, key, follow)
+ msg = "returning %s" % key
+
+ module.exit_json(changed=changed, msg=msg, xattr=res)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py
new file mode 100644
index 00000000..6f2f5dfa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ aliases: [pkg,package]
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent", "latest", "installed", "removed"]
+ type: str
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ type: bool
+ default: no
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ aliases: ['update-cache']
+ type: bool
+ default: yes
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ type: bool
+ default: no
+ upgrade_xbps:
+ description:
+ - Whether or not to upgrade the xbps package when necessary.
+ Before installing new packages,
+ xbps requires the user to update the xbps package itself.
+ Thus when this option is set to C(no),
+ upgrades and installations will fail when xbps is not up to date.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ force:
+ description:
+ - This option doesn't have any effect and is deprecated, it will be
+ removed in 3.0.0.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo (automatically updating the xbps package if needed)
+ community.general.xbps: name=foo state=present
+
+- name: Upgrade package foo
+ community.general.xbps: name=foo state=latest update_cache=yes
+
+- name: Remove packages foo and bar
+ community.general.xbps: name=foo,bar state=absent
+
+- name: Recursively remove package foo
+ community.general.xbps: name=foo state=absent recurse=yes
+
+- name: Update package cache
+ community.general.xbps: update_cache=yes
+
+- name: Upgrade packages
+ community.general.xbps: upgrade=yes
+
+- name: Install a package, failing if the xbps package is out of date
+ community.general.xbps:
+ name: foo
+ state: present
+ upgrade_xbps: no
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: str
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+ returned: success
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade_xbps(module, xbps_path, exit_on_success=False):
+ cmdupgradexbps = "%s -uy xbps" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg='Could not upgrade xbps itself')
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if(len(stdout.splitlines()) == 0):
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ elif module.check_mode:
+ module.exit_json(changed=True, msg='Would have performed upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ elif rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-upgrade again
+ module.params['upgrade_xbps'] = False
+ upgrade(module, xbps_path)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-update again
+ module.params['upgrade_xbps'] = False
+ install_packages(module, xbps_path, state, packages)
+ elif rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s" % (package))
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def update_cache(module, xbps_path, upgrade_planned):
+ """Update package cache"""
+ if module.check_mode:
+ if upgrade_planned:
+ return
+ module.exit_json(
+ changed=True, msg='Would have updated the package cache'
+ )
+ changed = update_package_db(module, xbps_path)
+ if not upgrade_planned:
+ module.exit_json(changed=changed, msg=(
+ 'Updated the package master lists' if changed
+ else 'Package list already up to date'
+ ))
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, aliases=['update-cache'],
+ type='bool'),
+ upgrade_xbps=dict(default=True, type='bool')
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_cache(module, xbps_path, (p['name'] or p['upgrade']))
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py
new file mode 100644
index 00000000..25923cb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp (@caphrim007)
+ - Robin Lee (@cheese)
+options: {}
+'''
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ community.general.xenserver_facts:
+
+- name: Print running VMs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+# Which will print:
+#
+# TASK: [Print running VMs] ***********************************************************
+# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+# "item": "Control domain on host: 10.0.13.22",
+# "msg": "Control domain on host: 10.0.13.22"
+# }
+'''
+
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ result = distro.linux_distribution()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ networks = change_keys(recs, key='name_label')
+ return networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.values():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.items():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ for param_name, param_value in rec.items():
+ # param_value may be of type xmlrpc.client.DateTime,
+ # which is not simply convertable to str.
+ # Use 'value' attr to get the str value,
+ # following an example in xmlrpc.client.DateTime document
+ if hasattr(param_value, "value"):
+ rec[param_name] = param_value.value
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+
+def get_vms(session):
+ recs = session.xenapi.VM.get_all_records()
+ if not recs:
+ return None
+ vms = change_keys(recs, key='name_label')
+ return vms
+
+
+def get_srs(session):
+ recs = session.xenapi.SR.get_all_records()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='name_label')
+ return srs
+
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible_facts=data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py
new file mode 100644
index 00000000..a9a5fb4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py
@@ -0,0 +1,1933 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest
+short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ modify various virtual machine components like network and disk, rename a virtual machine and
+ remove a virtual machine with associated components.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
+ XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
+ detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
+ values C(none) and C(dhcp) have same effect. More info here:
+ U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
+- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
+ to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
+ Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
+ useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ U(https://support.citrix.com/article/CTX226713)'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
+ - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
+ - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ type: str
+ default: present
+ choices: [ present, absent, poweredon ]
+ name:
+ description:
+ - Name of the VM to work with.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ required: yes
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - VM description.
+ type: str
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
+ type: str
+ template:
+ description:
+ - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
+ - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
+ - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
+ - If VM already exists, this setting will be ignored.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ template_src ]
+ template_uuid:
+ description:
+ - UUID of a template, an existing VM or a snapshot that should be used to create VM.
+ - It is required if template name is not unique.
+ type: str
+ is_template:
+ description:
+ - Convert VM to template.
+ type: bool
+ default: no
+ folder:
+ description:
+ - Destination folder for VM.
+ - This parameter is case sensitive.
+ - 'Example:'
+ - ' folder: /folder1/folder2'
+ type: str
+ hardware:
+ description:
+ - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
+ - 'Valid parameters are:'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ type: dict
+ disks:
+ description:
+ - A list of disks to add to VM.
+ - All parameters are case sensitive.
+ - Removing or detaching existing disks of VM is not supported.
+ - 'Required parameters per entry:'
+ - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
+ - 'Optional parameters per entry:'
+ - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
+ - ' - C(name_desc) (string): Disk description.'
+ - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
+ - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
+ type: list
+ elements: dict
+ aliases: [ disk ]
+ cdrom:
+ description:
+ - A CD-ROM configuration for the VM.
+ - All parameters are case sensitive.
+ - 'Valid parameters are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
+ - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
+ Required if C(type) is set to C(iso).'
+ type: dict
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - All parameters are case sensitive.
+ - 'Required parameters per entry:'
+ - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
+ - 'Optional parameters per entry (used for VM hardware):'
+ - ' - C(mac) (string): Customize MAC address of the interface.'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
+ - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
+ - ' - C(gateway) (string): Static IPv4 gateway.'
+ - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
+ - ' - C(gateway6) (string): Static IPv6 gateway.'
+ type: list
+ elements: dict
+ aliases: [ network ]
+ home_server:
+ description:
+ - Name of a XenServer host that will be a Home Server for the VM.
+ - This parameter is case sensitive.
+ type: str
+ custom_params:
+ description:
+ - Define a list of custom VM params to set on VM.
+ - Useful for advanced users familiar with managing VM params trough xe CLI.
+ - A custom value object takes two fields C(key) and C(value) (see example below).
+ type: list
+ elements: dict
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+ linked_clone:
+ description:
+ - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
+ - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
+ type: bool
+ default: no
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM from a template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: CentOS 7
+ disks:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ memory_mb: 512
+ cdrom:
+ type: iso
+ iso_name: guest-tools.iso
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Create a VM template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_6
+ is_template: yes
+ disk:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ delegate_to: localhost
+ register: deploy
+
+- name: Rename a VM (requires the VM's UUID)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a VM by UUID
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ delegate_to: localhost
+
+- name: Modify custom params (boot order)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_8
+ state: present
+ custom_params:
+ - key: HVM_boot_params
+ value: { "order": "ndc" }
+ delegate_to: localhost
+
+- name: Customize network parameters
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_10
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100/24
+ gateway: 192.168.1.1
+ - type: dhcp
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+changes:
+ description: Detected or made changes to VM
+ returned: always
+ type: list
+ sample: [
+ {
+ "hardware": [
+ "num_cpus"
+ ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ],
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
+ is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
+ is_valid_ip6_addr, is_valid_ip6_prefix)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def exists(self):
+ """Returns True if VM exists, else False."""
+ return True if self.vm_ref is not None else False
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+ def deploy(self):
+ """Deploys new VM from template."""
+ # Safety check.
+ if self.exists():
+ self.module.fail_json(msg="Called deploy on existing VM!")
+
+ try:
+ templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
+ msg_prefix="VM deploy: ")
+
+ # Is this an existing running VM?
+ if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
+ self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
+
+ # Find a SR we can use for VM.copy(). We use SR of the first disk
+ # if specified or default SR if not specified.
+ disk_params_list = self.module.params['disks']
+
+ sr_ref = None
+
+ if disk_params_list:
+ disk_params = disk_params_list[0]
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM deploy disks[0]: ")
+
+ if not sr_ref:
+ if self.default_sr_ref != "OpaqueRef:NULL":
+ sr_ref = self.default_sr_ref
+ else:
+ self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
+
+ # VM name could be an empty string which is bad.
+ if self.module.params['name'] is not None and not self.module.params['name']:
+ self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Now we can instantiate VM. We use VM.clone for linked_clone and
+ # VM.copy for non linked_clone.
+ if self.module.params['linked_clone']:
+ self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
+ else:
+ self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
+
+ # Description is copied over from template so we reset it.
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
+
+ # If template is one of built-in XenServer templates, we have to
+ # do some additional steps.
+ # Note: VM.get_is_default_template() is supported from XenServer 7.2
+ # onward so we use an alternative way.
+ templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
+
+ if "default_template" in templ_other_config and templ_other_config['default_template']:
+ # other_config of built-in XenServer templates have a key called
+ # 'disks' with the following content:
+ # disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
+ # This value of other_data is copied to cloned or copied VM and
+ # it prevents provisioning of VM because sr is not specified and
+ # XAPI returns an error. To get around this, we remove the
+ # 'disks' key and add disks to VM later ourselves.
+ vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
+
+ if "disks" in vm_other_config:
+ del vm_other_config['disks']
+
+ self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
+
+ # At this point we have VM ready for provisioning.
+ self.xapi_session.xenapi.VM.provision(self.vm_ref)
+
+ # After provisioning we can prepare vm_params for reconfigure().
+ self.gather_params()
+
+ # VM is almost ready. We just need to reconfigure it...
+ self.reconfigure()
+
+ # Power on VM if needed.
+ if self.module.params['state'] == "poweredon":
+ self.set_power_state("poweredon")
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def reconfigure(self):
+ """Reconfigures an existing VM.
+
+ Returns:
+ list: parameters that were reconfigured.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called reconfigure on non existing VM!")
+
+ config_changes = self.get_changes()
+
+ vm_power_state_save = self.vm_params['power_state'].lower()
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return config_changes
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
+ self.set_power_state("shutdownguest")
+
+ try:
+ for change in config_changes:
+ if isinstance(change, six.string_types):
+ if change == "name":
+ self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
+ elif change == "name_desc":
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
+ elif change == "folder":
+ self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
+
+ if self.module.params['folder']:
+ self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
+ elif change == "home_server":
+ if self.module.params['home_server']:
+ host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
+ else:
+ host_ref = "OpaqueRef:NULL"
+
+ self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
+ elif isinstance(change, dict):
+ if change.get('hardware'):
+ for hardware_change in change['hardware']:
+ if hardware_change == "num_cpus":
+ num_cpus = int(self.module.params['hardware']['num_cpus'])
+
+ if num_cpus < int(self.vm_params['VCPUs_at_startup']):
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ else:
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ elif hardware_change == "num_cpu_cores_per_socket":
+ self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
+ num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
+
+ if num_cpu_cores_per_socket > 1:
+ self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
+ elif hardware_change == "memory_mb":
+ memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
+ vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
+
+ self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
+ elif change.get('disks_changed'):
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+ position = 0
+
+ for disk_change_list in change['disks_changed']:
+ for disk_change in disk_change_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
+
+ if disk_change == "name":
+ self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
+ elif disk_change == "name_desc":
+ self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
+ elif disk_change == "size":
+ self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
+ "VM reconfigure disks[%s]: " % position)))
+
+ position += 1
+ elif change.get('disks_new'):
+ for position, disk_userdevice in change['disks_new']:
+ disk_params = self.module.params['disks'][position]
+
+ disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
+ disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
+
+ if disk_params.get('sr_uuid'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
+ elif disk_params.get('sr'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
+ else:
+ sr_ref = self.default_sr_ref
+
+ disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
+
+ new_disk_vdi = {
+ "name_label": disk_name,
+ "name_description": disk_name_desc,
+ "SR": sr_ref,
+ "virtual_size": disk_size,
+ "type": "user",
+ "sharable": False,
+ "read_only": False,
+ "other_config": {},
+ }
+
+ new_disk_vbd = {
+ "VM": self.vm_ref,
+ "VDI": None,
+ "userdevice": disk_userdevice,
+ "bootable": False,
+ "mode": "RW",
+ "type": "Disk",
+ "empty": False,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
+ vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
+
+ elif change.get('cdrom'):
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If there is no CD present, we have to create one.
+ if not vm_cdrom_params_list:
+ # We will try to place cdrom at userdevice position
+ # 3 (which is default) if it is not already occupied
+ # else we will place it at first allowed position.
+ cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if "3" in cdrom_userdevices_allowed:
+ cdrom_userdevice = "3"
+ else:
+ cdrom_userdevice = cdrom_userdevices_allowed[0]
+
+ cdrom_vbd = {
+ "VM": self.vm_ref,
+ "VDI": "OpaqueRef:NULL",
+ "userdevice": cdrom_userdevice,
+ "bootable": False,
+ "mode": "RO",
+ "type": "CD",
+ "empty": True,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
+ else:
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
+
+ cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
+
+ for cdrom_change in change['cdrom']:
+ if cdrom_change == "type":
+ cdrom_type = self.module.params['cdrom']['type']
+
+ if cdrom_type == "none" and not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+ elif cdrom_type == "host":
+ # Unimplemented!
+ pass
+
+ elif cdrom_change == "iso_name":
+ if not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+
+ cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
+ self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
+ elif change.get('networks_changed'):
+ position = 0
+
+ for network_change_list in change['networks_changed']:
+ if network_change_list:
+ vm_vif_params = self.vm_params['VIFs'][position]
+ network_params = self.module.params['networks'][position]
+
+ vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
+ network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
+
+ vif_recreated = False
+
+ if "name" in network_change_list or "mac" in network_change_list:
+ # To change network or MAC, we destroy old
+ # VIF and then create a new one with changed
+ # parameters. That's how XenCenter does it.
+
+ # Copy all old parameters to new VIF record.
+ vif = {
+ "device": vm_vif_params['device'],
+ "network": network_ref,
+ "VM": vm_vif_params['VM'],
+ "MAC": vm_vif_params['MAC'],
+ "MTU": vm_vif_params['MTU'],
+ "other_config": vm_vif_params['other_config'],
+ "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
+ "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
+ "locking_mode": vm_vif_params['locking_mode'],
+ "ipv4_allowed": vm_vif_params['ipv4_allowed'],
+ "ipv6_allowed": vm_vif_params['ipv6_allowed'],
+ }
+
+ if "name" in network_change_list:
+ network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+ vif['network'] = network_ref_new
+ vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
+
+ if "mac" in network_change_list:
+ vif['MAC'] = network_params['mac'].lower()
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.unplug(vif_ref)
+
+ self.xapi_session.xenapi.VIF.destroy(vif_ref)
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ vif_ref = vif_ref_new
+ vif_recreated = True
+
+ if self.vm_params['customization_agent'] == "native":
+ vif_reconfigure_needed = False
+
+ if "type" in network_change_list:
+ network_type = network_params['type'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type = vm_vif_params['ipv4_configuration_mode']
+
+ if "ip" in network_change_list:
+ network_ip = network_params['ip']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses']:
+ network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
+ else:
+ network_ip = ""
+
+ if "prefix" in network_change_list:
+ network_prefix = "/%s" % network_params['prefix']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ else:
+ network_prefix = ""
+
+ if "gateway" in network_change_list:
+ network_gateway = network_params['gateway']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway = vm_vif_params['ipv4_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
+ "%s%s" % (network_ip, network_prefix), network_gateway)
+
+ vif_reconfigure_needed = False
+
+ if "type6" in network_change_list:
+ network_type6 = network_params['type6'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type6 = vm_vif_params['ipv6_configuration_mode']
+
+ if "ip6" in network_change_list:
+ network_ip6 = network_params['ip6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses']:
+ network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
+ else:
+ network_ip6 = ""
+
+ if "prefix6" in network_change_list:
+ network_prefix6 = "/%s" % network_params['prefix6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
+ else:
+ network_prefix6 = ""
+
+ if "gateway6" in network_change_list:
+ network_gateway6 = network_params['gateway6']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway6 = vm_vif_params['ipv6_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
+ "%s%s" % (network_ip6, network_prefix6), network_gateway6)
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vif_device = vm_vif_params['device']
+
+ # A user could have manually changed network
+ # or mac e.g. trough XenCenter and then also
+ # make those changes in playbook manually.
+ # In that case, module will not detect any
+ # changes and info in xenstore_data will
+ # become stale. For that reason we always
+ # update name and mac in xenstore_data.
+
+ # Since we handle name and mac differently,
+ # we have to remove them from
+ # network_change_list.
+ network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
+
+ for network_change in network_change_list_tmp + ['name', 'mac']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change))
+
+ if network_params.get('name'):
+ network_name = network_params['name']
+ else:
+ network_name = vm_vif_params['network']['name_label']
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
+
+ if network_params.get('mac'):
+ network_mac = network_params['mac'].lower()
+ else:
+ network_mac = vm_vif_params['MAC'].lower()
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
+
+ for network_change in network_change_list_tmp:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change),
+ network_params[network_change])
+
+ position += 1
+ elif change.get('networks_new'):
+ for position, vif_device in change['networks_new']:
+ network_params = self.module.params['networks'][position]
+
+ network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+
+ network_name = network_params['name']
+ network_mac = network_params['mac'] if network_params.get('mac') else ""
+ network_type = network_params.get('type')
+ network_ip = network_params['ip'] if network_params.get('ip') else ""
+ network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
+ network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
+ network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
+ network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
+ network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
+
+ vif = {
+ "device": vif_device,
+ "network": network_ref,
+ "VM": self.vm_ref,
+ "MAC": network_mac,
+ "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
+ "%s/%s" % (network_ip, network_prefix), network_gateway)
+
+ if network_type6 and network_type6 == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
+ "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
+ elif self.vm_params['customization_agent'] == "custom":
+ # We first have to remove any existing data
+ # from xenstore_data because there could be
+ # some old leftover data from some interface
+ # that once occupied same device location as
+ # our new interface.
+ for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
+
+ # We get MAC from VIF itself instead of
+ # networks.mac because it could be
+ # autogenerated.
+ vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
+
+ if network_type:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
+
+ if network_type == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip" % vif_device, network_ip)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix" % vif_device, network_prefix)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/netmask" % vif_device, network_netmask)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway" % vif_device, network_gateway)
+
+ if network_type6:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
+
+ if network_type6 == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip6" % vif_device, network_ip6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
+
+ elif change.get('custom_params'):
+ for position in change['custom_params']:
+ custom_param_key = self.module.params['custom_params'][position]['key']
+ custom_param_value = self.module.params['custom_params'][position]['value']
+ self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
+
+ if self.module.params['is_template']:
+ self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
+ elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
+ self.set_power_state("poweredon")
+
+ # Gather new params after reconfiguration.
+ self.gather_params()
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return config_changes
+
+ def destroy(self):
+ """Removes an existing VM with associated disks"""
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called destroy on non existing VM!")
+
+ if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Make sure that VM is poweredoff before we can destroy it.
+ self.set_power_state("poweredoff")
+
+ try:
+ # Destroy VM!
+ self.xapi_session.xenapi.VM.destroy(self.vm_ref)
+
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Destroy all VDIs associated with VM!
+ for vm_disk_params in vm_disk_params_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
+
+ self.xapi_session.xenapi.VDI.destroy(vdi_ref)
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_changes(self):
+ """Finds VM parameters that differ from specified ones.
+
+ This method builds a dictionary with hierarchy of VM parameters
+ that differ from those specified in module parameters.
+
+ Returns:
+ list: VM parameters that differ from those specified in
+ module parameters.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called get_changes on non existing VM!")
+
+ need_poweredoff = False
+
+ if self.module.params['is_template']:
+ need_poweredoff = True
+
+ try:
+ # This VM could be a template or a snapshot. In that case we fail
+ # because we can't reconfigure them or it would just be too
+ # dangerous.
+ if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
+
+ if self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
+
+ # Let's build a list of parameters that changed.
+ config_changes = []
+
+ # Name could only differ if we found an existing VM by uuid.
+ if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
+ if self.module.params['name']:
+ config_changes.append('name')
+ else:
+ self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
+
+ if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
+ config_changes.append('name_desc')
+
+ # Folder parameter is found in other_config.
+ vm_other_config = self.vm_params['other_config']
+ vm_folder = vm_other_config.get('folder', '')
+
+ if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
+ config_changes.append('folder')
+
+ if self.module.params['home_server'] is not None:
+ if (self.module.params['home_server'] and
+ (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
+
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
+ msg_prefix="VM check home_server: ")
+
+ config_changes.append('home_server')
+ elif not self.module.params['home_server'] and self.vm_params['affinity']:
+ config_changes.append('home_server')
+
+ config_changes_hardware = []
+
+ if self.module.params['hardware']:
+ num_cpus = self.module.params['hardware'].get('num_cpus')
+
+ if num_cpus is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpus = int(num_cpus)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
+
+ if num_cpus < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
+
+ # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
+ # say the former is the way to go but this needs
+ # confirmation and testing.
+ if num_cpus != int(self.vm_params['VCPUs_at_startup']):
+ config_changes_hardware.append('num_cpus')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
+
+ if num_cpu_cores_per_socket is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
+
+ if num_cpu_cores_per_socket < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
+
+ if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
+
+ vm_platform = self.vm_params['platform']
+ vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
+
+ if num_cpu_cores_per_socket != vm_cores_per_socket:
+ config_changes_hardware.append('num_cpu_cores_per_socket')
+ # For now, we don't support hotpluging so VM has to be
+ # in poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ memory_mb = self.module.params['hardware'].get('memory_mb')
+
+ if memory_mb is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ memory_mb = int(memory_mb)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
+
+ if memory_mb < 1:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
+
+ # There are multiple memory parameters:
+ # - memory_dynamic_max
+ # - memory_dynamic_min
+ # - memory_static_max
+ # - memory_static_min
+ # - memory_target
+ #
+ # memory_target seems like a good candidate but it returns 0 for
+ # halted VMs so we can't use it.
+ #
+ # I decided to use memory_dynamic_max and memory_static_max
+ # and use whichever is larger. This strategy needs validation
+ # and testing.
+ #
+ # XenServer stores memory size in bytes so we need to divide
+ # it by 1024*1024 = 1048576.
+ if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
+ config_changes_hardware.append('memory_mb')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ if config_changes_hardware:
+ config_changes.append({"hardware": config_changes_hardware})
+
+ config_changes_disks = []
+ config_new_disks = []
+
+ # Find allowed userdevices.
+ vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if self.module.params['disks']:
+ # Get the list of all disk. Filter out any CDs found.
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Number of disks defined in module params have to be same or
+ # higher than a number of existing disks attached to the VM.
+ # We don't support removal or detachment of disks.
+ if len(self.module.params['disks']) < len(vm_disk_params_list):
+ self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
+ (len(self.module.params['disks']), len(vm_disk_params_list)))
+
+ # Find the highest disk occupied userdevice.
+ if not vm_disk_params_list:
+ vm_disk_userdevice_highest = "-1"
+ else:
+ vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
+
+ for position in range(len(self.module.params['disks'])):
+ if position < len(vm_disk_params_list):
+ vm_disk_params = vm_disk_params_list[position]
+ else:
+ vm_disk_params = None
+
+ disk_params = self.module.params['disks'][position]
+
+ disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
+
+ disk_name = disk_params.get('name')
+
+ if disk_name is not None and not disk_name:
+ self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
+
+ # If this is an existing disk.
+ if vm_disk_params and vm_disk_params['VDI']:
+ disk_changes = []
+
+ if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
+ disk_changes.append('name')
+
+ disk_name_desc = disk_params.get('name_desc')
+
+ if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
+ disk_changes.append('name_desc')
+
+ if disk_size:
+ if disk_size > int(vm_disk_params['VDI']['virtual_size']):
+ disk_changes.append('size')
+ need_poweredoff = True
+ elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
+ self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
+ "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
+
+ config_changes_disks.append(disk_changes)
+ # If this is a new disk.
+ else:
+ if not disk_size:
+ self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM check disks[%s]: " % position)
+ elif self.default_sr_ref == 'OpaqueRef:NULL':
+ self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
+
+ if not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
+
+ disk_userdevice = None
+
+ # We need to place a new disk right above the highest
+ # placed existing disk to maintain relative disk
+ # positions pairable with disk specifications in
+ # module params. That place must not be occupied by
+ # some other device like CD-ROM.
+ for userdevice in vbd_userdevices_allowed:
+ if int(userdevice) > int(vm_disk_userdevice_highest):
+ disk_userdevice = userdevice
+ vbd_userdevices_allowed.remove(userdevice)
+ vm_disk_userdevice_highest = userdevice
+ break
+
+ # If no place was found.
+ if disk_userdevice is None:
+ # Highest occupied place could be a CD-ROM device
+ # so we have to include all devices regardless of
+ # type when calculating out-of-bound position.
+ disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
+ self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
+
+ # For new disks we only track their position.
+ config_new_disks.append((position, disk_userdevice))
+
+ # We should append config_changes_disks to config_changes only
+ # if there is at least one changed disk, else skip.
+ for disk_change in config_changes_disks:
+ if disk_change:
+ config_changes.append({"disks_changed": config_changes_disks})
+ break
+
+ if config_new_disks:
+ config_changes.append({"disks_new": config_new_disks})
+
+ config_changes_cdrom = []
+
+ if self.module.params['cdrom']:
+ # Get the list of all CD-ROMs. Filter out any regular disks
+ # found. If we found no existing CD-ROM, we will create it
+ # later else take the first one found.
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If no existing CD-ROM is found, we will need to add one.
+ # We need to check if there is any userdevice allowed.
+ if not vm_cdrom_params_list and not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
+
+ cdrom_type = self.module.params['cdrom'].get('type')
+ cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
+
+ # If cdrom.iso_name is specified but cdrom.type is not,
+ # then set cdrom.type to 'iso', unless cdrom.iso_name is
+ # an empty string, in that case set cdrom.type to 'none'.
+ if not cdrom_type:
+ if cdrom_iso_name:
+ cdrom_type = "iso"
+ elif cdrom_iso_name is not None:
+ cdrom_type = "none"
+
+ self.module.params['cdrom']['type'] = cdrom_type
+
+ # If type changed.
+ if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
+ config_changes_cdrom.append('type')
+
+ if cdrom_type == "iso":
+ # Check if ISO exists.
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
+ msg_prefix="VM check cdrom.iso_name: ")
+
+ # Is ISO image changed?
+ if (cdrom_iso_name and
+ (not vm_cdrom_params_list or
+ not vm_cdrom_params_list[0]['VDI'] or
+ cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
+ config_changes_cdrom.append('iso_name')
+
+ if config_changes_cdrom:
+ config_changes.append({"cdrom": config_changes_cdrom})
+
+ config_changes_networks = []
+ config_new_networks = []
+
+ # Find allowed devices.
+ vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
+
+ if self.module.params['networks']:
+ # Number of VIFs defined in module params have to be same or
+ # higher than a number of existing VIFs attached to the VM.
+ # We don't support removal of VIFs.
+ if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
+ self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
+ (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
+
+ # Find the highest occupied device.
+ if not self.vm_params['VIFs']:
+ vif_device_highest = "-1"
+ else:
+ vif_device_highest = self.vm_params['VIFs'][-1]['device']
+
+ for position in range(len(self.module.params['networks'])):
+ if position < len(self.vm_params['VIFs']):
+ vm_vif_params = self.vm_params['VIFs'][position]
+ else:
+ vm_vif_params = None
+
+ network_params = self.module.params['networks'][position]
+
+ network_name = network_params.get('name')
+
+ if network_name is not None and not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
+
+ if network_name:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
+ msg_prefix="VM check networks[%s]: " % position)
+
+ network_mac = network_params.get('mac')
+
+ if network_mac is not None:
+ network_mac = network_mac.lower()
+
+ if not is_mac(network_mac):
+ self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
+
+ # IPv4 reconfiguration.
+ network_type = network_params.get('type')
+ network_ip = network_params.get('ip')
+ network_netmask = network_params.get('netmask')
+ network_prefix = None
+
+ # If networks.ip is specified and networks.type is not,
+ # then set networks.type to 'static'.
+ if not network_type and network_ip:
+ network_type = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
+ network_type = "none"
+
+ if network_type and network_type == "static":
+ if network_ip is not None:
+ network_ip_split = network_ip.split('/')
+ network_ip = network_ip_split[0]
+
+ if network_ip and not is_valid_ip_addr(network_ip):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
+
+ if len(network_ip_split) > 1:
+ network_prefix = network_ip_split[1]
+
+ if not is_valid_ip_prefix(network_prefix):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
+
+ if network_netmask is not None:
+ if not is_valid_ip_netmask(network_netmask):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
+
+ network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
+ elif network_prefix is not None:
+ network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
+
+ # If any parameter is overridden at this point, update it.
+ if network_type:
+ network_params['type'] = network_type
+
+ if network_ip:
+ network_params['ip'] = network_ip
+
+ if network_netmask:
+ network_params['netmask'] = network_netmask
+
+ if network_prefix:
+ network_params['prefix'] = network_prefix
+
+ network_gateway = network_params.get('gateway')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway and not is_valid_ip_addr(network_gateway):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
+
+ # IPv6 reconfiguration.
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params.get('ip6')
+ network_prefix6 = None
+
+ # If networks.ip6 is specified and networks.type6 is not,
+ # then set networks.type6 to 'static'.
+ if not network_type6 and network_ip6:
+ network_type6 = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
+ network_type6 = "none"
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 is not None:
+ network_ip6_split = network_ip6.split('/')
+ network_ip6 = network_ip6_split[0]
+
+ if network_ip6 and not is_valid_ip6_addr(network_ip6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
+
+ if len(network_ip6_split) > 1:
+ network_prefix6 = network_ip6_split[1]
+
+ if not is_valid_ip6_prefix(network_prefix6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
+
+ # If any parameter is overridden at this point, update it.
+ if network_type6:
+ network_params['type6'] = network_type6
+
+ if network_ip6:
+ network_params['ip6'] = network_ip6
+
+ if network_prefix6:
+ network_params['prefix6'] = network_prefix6
+
+ network_gateway6 = network_params.get('gateway6')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
+
+ # If this is an existing VIF.
+ if vm_vif_params and vm_vif_params['network']:
+ network_changes = []
+
+ if network_name and network_name != vm_vif_params['network']['name_label']:
+ network_changes.append('name')
+
+ if network_mac and network_mac != vm_vif_params['MAC'].lower():
+ network_changes.append('mac')
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
+ network_changes.append('type')
+
+ if network_type and network_type == "static":
+ if network_ip and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
+ network_changes.append('ip')
+
+ if network_prefix and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+
+ if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
+ network_changes.append('gateway')
+
+ if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
+ network_changes.append('type6')
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
+ network_changes.append('ip6')
+
+ if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
+ network_changes.append('prefix6')
+
+ if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
+ network_changes.append('gateway6')
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = self.vm_params['xenstore_data']
+
+ if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
+ network_changes.append('type')
+ need_poweredoff = True
+
+ if network_type and network_type == "static":
+ if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
+ network_changes.append('ip')
+ need_poweredoff = True
+
+ if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+ need_poweredoff = True
+
+ if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
+ network_changes.append('type6')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
+ network_changes.append('ip6')
+ need_poweredoff = True
+
+ if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
+ network_changes.append('prefix6')
+ need_poweredoff = True
+
+ if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway6')
+ need_poweredoff = True
+
+ config_changes_networks.append(network_changes)
+ # If this is a new VIF.
+ else:
+ if not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
+
+ if network_type and network_type == "static" and network_ip and not network_netmask:
+ self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
+
+ if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
+ self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
+
+ # Restart is needed if we are adding new network
+ # interface with IP/gateway parameters specified
+ # and custom agent is used.
+ if self.vm_params['customization_agent'] == "custom":
+ for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ if network_params.get(parameter):
+ need_poweredoff = True
+ break
+
+ if not vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
+
+ # We need to place a new network interface right above the
+ # highest placed existing interface to maintain relative
+ # positions pairable with network interface specifications
+ # in module params.
+ vif_device = str(int(vif_device_highest) + 1)
+
+ if vif_device not in vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
+
+ vif_devices_allowed.remove(vif_device)
+ vif_device_highest = vif_device
+
+ # For new VIFs we only track their position.
+ config_new_networks.append((position, vif_device))
+
+ # We should append config_changes_networks to config_changes only
+ # if there is at least one changed network, else skip.
+ for network_change in config_changes_networks:
+ if network_change:
+ config_changes.append({"networks_changed": config_changes_networks})
+ break
+
+ if config_new_networks:
+ config_changes.append({"networks_new": config_new_networks})
+
+ config_changes_custom_params = []
+
+ if self.module.params['custom_params']:
+ for position in range(len(self.module.params['custom_params'])):
+ custom_param = self.module.params['custom_params'][position]
+
+ custom_param_key = custom_param['key']
+ custom_param_value = custom_param['value']
+
+ if custom_param_key not in self.vm_params:
+ self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
+
+ if custom_param_value != self.vm_params[custom_param_key]:
+ # We only need to track custom param position.
+ config_changes_custom_params.append(position)
+
+ if config_changes_custom_params:
+ config_changes.append({"custom_params": config_changes_custom_params})
+
+ if need_poweredoff:
+ config_changes.append('need_poweredoff')
+
+ return config_changes
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_normalized_disk_size(self, disk_params, msg_prefix=""):
+ """Parses disk size parameters and returns disk size in bytes.
+
+ This method tries to parse disk size module parameters. It fails
+ with an error message if size cannot be parsed.
+
+ Args:
+ disk_params (dist): A dictionary with disk parameters.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ int: disk size in bytes if disk size is successfully parsed or
+ None if no disk size parameters were found.
+ """
+ # There should be only single size spec but we make a list of all size
+ # specs just in case. Priority is given to 'size' but if not found, we
+ # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
+ # found.
+ disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
+
+ if disk_size_spec:
+ try:
+ # size
+ if "size" in disk_size_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
+ disk_size_m = size_regex.match(disk_params['size'])
+
+ if disk_size_m:
+ size = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+ # size_tb, size_gb, size_mb, size_kb, size_b
+ else:
+ size = disk_params[disk_size_spec[0]]
+ unit = disk_size_spec[0].split('_')[-1]
+
+ if not unit:
+ unit = "b"
+ else:
+ unit = unit.lower()
+
+ if re.match(r'\d+\.\d+', size):
+ # We found float value in string, let's typecast it.
+ if unit == "b":
+ # If we found float but unit is bytes, we get the integer part only.
+ size = int(float(size))
+ else:
+ size = float(size)
+ else:
+ # We found int value in string, let's typecast it.
+ size = int(size)
+
+ if not size or size < 0:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
+
+ disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
+
+ if unit in disk_units:
+ return int(size * (1024 ** disk_units[unit]))
+ else:
+ self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
+ (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
+ else:
+ return None
+
+ @staticmethod
+ def get_cdrom_type(vm_cdrom_params):
+ """Returns VM CD-ROM type."""
+ # TODO: implement support for detecting type host. No server to test
+ # this on at the moment.
+ if vm_cdrom_params['empty']:
+ return "none"
+ else:
+ return "iso"
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'poweredon']),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ uuid=dict(type='str'),
+ template=dict(type='str', aliases=['template_src']),
+ template_uuid=dict(type='str'),
+ is_template=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ hardware=dict(
+ type='dict',
+ options=dict(
+ num_cpus=dict(type='int'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ memory_mb=dict(type='int'),
+ ),
+ ),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_tb=dict(type='str'),
+ size_gb=dict(type='str'),
+ size_mb=dict(type='str'),
+ size_kb=dict(type='str'),
+ size_b=dict(type='str'),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ sr=dict(type='str'),
+ sr_uuid=dict(type='str'),
+ ),
+ aliases=['disk'],
+ mutually_exclusive=[
+ ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
+ ['sr', 'sr_uuid'],
+ ],
+ ),
+ cdrom=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', choices=['none', 'iso']),
+ iso_name=dict(type='str'),
+ ),
+ required_if=[
+ ['type', 'iso', ['iso_name']],
+ ],
+ ),
+ networks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', aliases=['name_label']),
+ mac=dict(type='str'),
+ type=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ type6=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip6=dict(type='str'),
+ gateway6=dict(type='str'),
+ ),
+ aliases=['network'],
+ required_if=[
+ ['type', 'static', ['ip']],
+ ['type6', 'static', ['ip6']],
+ ],
+ ),
+ home_server=dict(type='str'),
+ custom_params=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ key=dict(type='str', required=True),
+ value=dict(type='raw', required=True),
+ ),
+ ),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ linked_clone=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ mutually_exclusive=[
+ ['template', 'template_uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ vm = XenServerVM(module)
+
+ # Find existing VM
+ if vm.exists():
+ if module.params['state'] == "absent":
+ vm.destroy()
+ result['changed'] = True
+ elif module.params['state'] == "present":
+ config_changes = vm.reconfigure()
+
+ if config_changes:
+ result['changed'] = True
+
+ # Make new disk and network changes more user friendly
+ # and informative.
+ for change in config_changes:
+ if isinstance(change, dict):
+ if change.get('disks_new'):
+ disks_new = []
+
+ for position, userdevice in change['disks_new']:
+ disk_new_params = {"position": position, "vbd_userdevice": userdevice}
+ disk_params = module.params['disks'][position]
+
+ for k in disk_params.keys():
+ if disk_params[k] is not None:
+ disk_new_params[k] = disk_params[k]
+
+ disks_new.append(disk_new_params)
+
+ if disks_new:
+ change['disks_new'] = disks_new
+
+ elif change.get('networks_new'):
+ networks_new = []
+
+ for position, device in change['networks_new']:
+ network_new_params = {"position": position, "vif_device": device}
+ network_params = module.params['networks'][position]
+
+ for k in network_params.keys():
+ if network_params[k] is not None:
+ network_new_params[k] = network_params[k]
+
+ networks_new.append(network_new_params)
+
+ if networks_new:
+ change['networks_new'] = networks_new
+
+ result['changes'] = config_changes
+
+ elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
+ result['changed'] = vm.set_power_state(module.params['state'])
+ elif module.params['state'] != "absent":
+ vm.deploy()
+ result['changed'] = True
+
+ if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
new file mode 100644
index 00000000..4a195ff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_powerstate
+short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
+ - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ type: str
+ default: present
+ choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
+ name:
+ description:
+ - Name of the VM to manage.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Power on VM
+ community.general.xenserver_guest_powerstate:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ state: powered-on
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Set VM power state.
+ if module.params['state'] != "present":
+ result['changed'] = vm.set_power_state(module.params['state'])
+
+ if module.params['wait_for_ip_address']:
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py
new file mode 100644
index 00000000..8d0700ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Joseph Benden <joe@benden.us>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf
+author:
+ - "Joseph Benden (@jbenden)"
+ - "Alexei Znamensky (@russoz)"
+short_description: Edit XFCE4 Configurations
+description:
+ - This module allows for the manipulation of Xfce 4 Configuration via
+ xfconf-query. Please see the xfconf-query(1) man pages for more details.
+options:
+ channel:
+ description:
+ - A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored. See man xfconf-query(1)
+ required: yes
+ type: str
+ property:
+ description:
+ - A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference. See man xfconf-query(1)
+ required: yes
+ type: str
+ value:
+ description:
+ - Preference properties typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". For array mode, use a list of values. See man xfconf-query(1)
+ type: list
+ elements: raw
+ value_type:
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ For array mode, use a list of types.
+ type: list
+ elements: str
+ choices: [ int, uint, bool, float, double, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the property/value.
+ choices: [ get, present, absent ]
+ default: "present"
+ force_array:
+ description:
+ - Force array even if only one element
+ type: bool
+ default: 'no'
+ aliases: ['array']
+ version_added: 1.0.0
+'''
+
+EXAMPLES = """
+- name: Change the DPI to "192"
+ xfconf:
+ channel: "xsettings"
+ property: "/Xft/DPI"
+ value_type: "int"
+ value: "192"
+
+- name: Set workspace names (4)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main', 'Work1', 'Work2', 'Tmp']
+
+- name: Set workspace names (1)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main']
+ force_array: yes
+"""
+
+RETURN = '''
+ channel:
+ description: The channel specified in the module parameters
+ returned: success
+ type: str
+ sample: "xsettings"
+ property:
+ description: The property specified in the module parameters
+ returned: success
+ type: str
+ sample: "/Xft/DPI"
+ value_type:
+ description:
+ - The type of the value that was changed (C(none) for C(get) and C(reset)
+ state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"int" or ["str", "str", "str"]'
+ value:
+ description:
+ - The value of the preference key after executing the module. Either a
+ single string value or a list of strings for array types.
+ returned: success
+ type: string or list of strings
+ sample: '"192" or ["orange", "yellow", "violet"]'
+ previous_value:
+ description:
+ - The value of the preference key before executing the module (C(none) for
+ C(get) state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"96" or ["red", "blue", "green"]'
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ModuleHelper, CmdMixin, StateMixin, ArgFormat
+)
+
+
+def fix_bool(value):
+ vl = value.lower()
+ return vl if vl in ("true", "false") else value
+
+
+@ArgFormat.stars_deco(1)
+def values_fmt(values, value_types):
+ result = []
+ for value, value_type in zip(values, value_types):
+ if value_type == 'bool':
+ value = fix_bool(value)
+ result.append('--type')
+ result.append('{0}'.format(value_type))
+ result.append('--set')
+ result.append('{0}'.format(value))
+ return result
+
+
+class XFConfException(Exception):
+ pass
+
+
+class XFConfProperty(CmdMixin, StateMixin, ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(default="present",
+ choices=("present", "get", "absent"),
+ type='str'),
+ channel=dict(required=True, type='str'),
+ property=dict(required=True, type='str'),
+ value_type=dict(required=False, type='list',
+ elements='str', choices=('int', 'uint', 'bool', 'float', 'double', 'string')),
+ value=dict(required=False, type='list', elements='raw'),
+ force_array=dict(default=False, type='bool', aliases=['array']),
+ ),
+ required_if=[('state', 'present', ['value', 'value_type'])],
+ required_together=[('value', 'value_type')],
+ supports_check_mode=True,
+ )
+
+ facts_name = "xfconf"
+ default_state = 'present'
+ command = 'xfconf-query'
+ command_args_formats = dict(
+ channel=dict(fmt=('--channel', '{0}'),),
+ property=dict(fmt=('--property', '{0}'),),
+ is_array=dict(fmt="--force-array", style=ArgFormat.BOOLEAN),
+ reset=dict(fmt="--reset", style=ArgFormat.BOOLEAN),
+ create=dict(fmt="--create", style=ArgFormat.BOOLEAN),
+ values_and_types=dict(fmt=values_fmt)
+ )
+
+ def update_xfconf_output(self, **kwargs):
+ self.update_output(**kwargs)
+ self.update_facts(**kwargs)
+
+ def __init_module__(self):
+ self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'],
+ self.module.params['channel'])
+ self.vars.previous_value = self._get()
+ self.update_xfconf_output(property=self.module.params['property'],
+ channel=self.module.params['channel'],
+ previous_value=None)
+
+ def process_command_output(self, rc, out, err):
+ if err.rstrip() == self.does_not:
+ return None
+ if rc or len(err):
+ raise XFConfException('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
+
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+
+ return result
+
+ @property
+ def changed(self):
+ if self.vars.previous_value is None:
+ return self.vars.value is not None
+ elif self.vars.value is None:
+ return self.vars.previous_value is not None
+ else:
+ return set(self.vars.previous_value) != set(self.vars.value)
+
+ def _get(self):
+ return self.run_command(params=('channel', 'property'))
+
+ def state_get(self):
+ self.vars.value = self.vars.previous_value
+ self.update_xfconf_output(value=self.vars.value)
+
+ def state_absent(self):
+ self.vars.value = None
+ self.run_command(params=('channel', 'property', 'reset'), extra_params={"reset": True})
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=None)
+
+ def state_present(self):
+ # stringify all values - in the CLI they will all be happy strings anyway
+ # and by doing this here the rest of the code can be agnostic to it
+ self.vars.value = [str(v) for v in self.module.params['value']]
+ value_type = self.module.params['value_type']
+
+ values_len = len(self.vars.value)
+ types_len = len(value_type)
+
+ if types_len == 1:
+ # use one single type for the entire list
+ value_type = value_type * values_len
+ elif types_len != values_len:
+ # or complain if lists' lengths are different
+ raise XFConfException('Number of elements in "value" and "value_type" must be the same')
+
+ # fix boolean values
+ self.vars.value = [fix_bool(v[0]) if v[1] == 'bool' else v[0] for v in zip(self.vars.value, value_type)]
+
+ # calculates if it is an array
+ self.vars.is_array = \
+ bool(self.module.params['force_array']) or \
+ isinstance(self.vars.previous_value, list) or \
+ values_len > 1
+
+ params = ['channel', 'property', 'create']
+ if self.vars.is_array:
+ params.append('is_array')
+ params.append('values_and_types')
+
+ extra_params = dict(values_and_types=(self.vars.value, value_type))
+ extra_params['create'] = True
+ extra_params['is_array'] = self.vars.is_array
+
+ if not self.module.check_mode:
+ self.run_command(params=params, extra_params=extra_params)
+
+ if not self.vars.is_array:
+ self.vars.value = self.vars.value[0]
+ value_type = value_type[0]
+
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=self.vars.value,
+ type=value_type)
+
+
+def main():
+ xfconf = XFConfProperty()
+ xfconf.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py
new file mode 100644
index 00000000..907f1bae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
+# Copyright: (c) 2018, William Leemans <willie@elaba.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xfs_quota
+short_description: Manage quotas on XFS filesystems
+description:
+ - Configure quotas on XFS filesystems.
+ - Before using this module /etc/projects and /etc/projid need to be configured.
+author:
+- William Leemans (@bushvin)
+options:
+ type:
+ description:
+ - The XFS quota type.
+ type: str
+ required: true
+ choices:
+ - user
+ - group
+ - project
+ name:
+ description:
+ - The name of the user, group or project to apply the quota to, if other than default.
+ type: str
+ mountpoint:
+ description:
+ - The mount point on which to apply the quotas.
+ type: str
+ required: true
+ bhard:
+ description:
+ - Hard blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ bsoft:
+ description:
+ - Soft blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ ihard:
+ description:
+ - Hard inodes quota limit.
+ type: int
+ isoft:
+ description:
+ - Soft inodes quota limit.
+ type: int
+ rtbhard:
+ description:
+ - Hard realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ rtbsoft:
+ description:
+ - Soft realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ state:
+ description:
+ - Whether to apply the limits or remove them.
+ - When removing limit, they are set to 0, and not quite removed.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+
+requirements:
+ - xfsprogs
+'''
+
+EXAMPLES = r'''
+- name: Set default project soft and hard limit on /opt of 1g
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ bsoft: 1g
+ bhard: 1g
+ state: present
+
+- name: Remove the default limits on /opt
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ state: absent
+
+- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
+ community.general.xfs_quota:
+ type: user
+ mountpoint: /home
+ isoft: 1024
+ ihard: 2048
+
+'''
+
+RETURN = r'''
+bhard:
+ description: the current bhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+bsoft:
+ description: the current bsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+ihard:
+ description: the current ihard setting in bytes
+ returned: always
+ type: int
+ sample: 100
+isoft:
+ description: the current isoft setting in bytes
+ returned: always
+ type: int
+ sample: 100
+rtbhard:
+ description: the current rtbhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+rtbsoft:
+ description: the current rtbsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+'''
+
+import grp
+import os
+import pwd
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bhard=dict(type='str'),
+ bsoft=dict(type='str'),
+ ihard=dict(type='int'),
+ isoft=dict(type='int'),
+ mountpoint=dict(type='str', required=True),
+ name=dict(type='str'),
+ rtbhard=dict(type='str'),
+ rtbsoft=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ type=dict(type='str', required=True, choices=['group', 'project', 'user'])
+ ),
+ supports_check_mode=True,
+ )
+
+ quota_type = module.params['type']
+ name = module.params['name']
+ mountpoint = module.params['mountpoint']
+ bhard = module.params['bhard']
+ bsoft = module.params['bsoft']
+ ihard = module.params['ihard']
+ isoft = module.params['isoft']
+ rtbhard = module.params['rtbhard']
+ rtbsoft = module.params['rtbsoft']
+ state = module.params['state']
+
+ if bhard is not None:
+ bhard = human_to_bytes(bhard)
+
+ if bsoft is not None:
+ bsoft = human_to_bytes(bsoft)
+
+ if rtbhard is not None:
+ rtbhard = human_to_bytes(rtbhard)
+
+ if rtbsoft is not None:
+ rtbsoft = human_to_bytes(rtbsoft)
+
+ result = dict(
+ changed=False,
+ )
+
+ if not os.path.ismount(mountpoint):
+ module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
+
+ mp = get_fs_by_mountpoint(mountpoint)
+ if mp is None:
+ module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
+
+ if quota_type == 'user':
+ type_arg = '-u'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
+ 'qnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
+ )
+ try:
+ pwd.getpwnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'group':
+ type_arg = '-g'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
+ )
+ try:
+ grp.getgrnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'project':
+ type_arg = '-p'
+ quota_default = '#0'
+ if name is None:
+ name = quota_default
+
+ if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
+ module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projects'):
+ module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projid'):
+ module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
+
+ if name != quota_default and name is not None and get_project_id(name) is None:
+ module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
+
+ prj_set = True
+ if name != quota_default:
+ cmd = 'project %s' % name
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get project state.', **result)
+ else:
+ for line in stdout.split('\n'):
+ if "Project Id '%s' - is not set." in line:
+ prj_set = False
+ break
+
+ if not prj_set and not module.check_mode:
+ cmd = 'project -s'
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get quota realtime block report.', **result)
+
+ result['changed'] = True
+
+ elif not prj_set and module.check_mode:
+ result['changed'] = True
+
+ # Set limits
+ if state == 'absent':
+ bhard = 0
+ bsoft = 0
+ ihard = 0
+ isoft = 0
+ rtbhard = 0
+ rtbsoft = 0
+
+ current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
+ current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
+ current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
+
+ result['xfs_quota'] = dict(
+ bsoft=current_bsoft,
+ bhard=current_bhard,
+ isoft=current_isoft,
+ ihard=current_ihard,
+ rtbsoft=current_rtbsoft,
+ rtbhard=current_rtbhard
+ )
+
+ limit = []
+ if bsoft is not None and int(bsoft) != current_bsoft:
+ limit.append('bsoft=%s' % bsoft)
+ result['bsoft'] = int(bsoft)
+
+ if bhard is not None and int(bhard) != current_bhard:
+ limit.append('bhard=%s' % bhard)
+ result['bhard'] = int(bhard)
+
+ if isoft is not None and isoft != current_isoft:
+ limit.append('isoft=%s' % isoft)
+ result['isoft'] = isoft
+
+ if ihard is not None and ihard != current_ihard:
+ limit.append('ihard=%s' % ihard)
+ result['ihard'] = ihard
+
+ if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
+ limit.append('rtbsoft=%s' % rtbsoft)
+ result['rtbsoft'] = int(rtbsoft)
+
+ if rtbhard is not None and int(rtbhard) != current_rtbhard:
+ limit.append('rtbhard=%s' % rtbhard)
+ result['rtbhard'] = int(rtbhard)
+
+ if len(limit) > 0 and not module.check_mode:
+ if name == quota_default:
+ cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
+ else:
+ cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
+
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not set limits.', **result)
+
+ result['changed'] = True
+
+ elif len(limit) > 0 and module.check_mode:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def quota_report(module, mountpoint, name, quota_type, used_type):
+ soft = None
+ hard = None
+
+ if quota_type == 'project':
+ type_arg = '-p'
+ elif quota_type == 'user':
+ type_arg = '-u'
+ elif quota_type == 'group':
+ type_arg = '-g'
+
+ if used_type == 'b':
+ used_arg = '-b'
+ used_name = 'blocks'
+ factor = 1024
+ elif used_type == 'i':
+ used_arg = '-i'
+ used_name = 'inodes'
+ factor = 1
+ elif used_type == 'rtb':
+ used_arg = '-r'
+ used_name = 'realtime blocks'
+ factor = 1024
+
+ rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
+
+ if rc != 0:
+ result = dict(
+ changed=False,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
+
+ for line in stdout.split('\n'):
+ line = line.strip().split()
+ if len(line) > 3 and line[0] == name:
+ soft = int(line[2]) * factor
+ hard = int(line[3]) * factor
+ break
+
+ return soft, hard
+
+
+def exec_quota(module, cmd, mountpoint):
+ cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
+ (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
+ if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
+ rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
+ module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
+
+ return rc, stdout, stderr
+
+
+def get_fs_by_mountpoint(mountpoint):
+ mpr = None
+ with open('/proc/mounts', 'r') as s:
+ for line in s.readlines():
+ mp = line.strip().split()
+ if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
+ mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
+ mpr['mntopts'] = mpr['mntopts'].split(',')
+ break
+ return mpr
+
+
+def get_project_id(name):
+ prjid = None
+ with open('/etc/projid', 'r') as s:
+ for line in s.readlines():
+ line = line.strip().partition(':')
+ if line[0] == name:
+ prjid = line[2]
+ break
+
+ return prjid
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py
new file mode 100644
index 00000000..1733e657
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py
@@ -0,0 +1,958 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- ansible.builtin.debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ ansible.builtin.debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ community.general.xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ changed = False
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ changed = True
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in community.general 2.0.0 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in community.general 2.0.0 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute,
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py
new file mode 100644
index 00000000..77489e24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017 David Gunter <david.gunter@tivix.com>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yarn
+short_description: Manage node.js packages with Yarn
+description:
+ - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
+author:
+ - "David Gunter (@verkaufer)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a node.js library to install
+ - If omitted all packages in package.json are installed.
+ - To globally install from local node.js library. Prepend "file:" to the path of the node.js library.
+ required: false
+ path:
+ type: path
+ description:
+ - The base path where Node.js libraries will be installed.
+ - This is where the node_modules folder lives.
+ required: false
+ version:
+ type: str
+ description:
+ - The version of the library to be installed.
+ - Must be in semver format. If "latest" is desired, use "state" arg instead
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: no
+ type: bool
+ executable:
+ type: path
+ description:
+ - The executable location for yarn.
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Yarn will ignore any dependencies under devDependencies in package.json
+ required: false
+ type: bool
+ default: no
+ registry:
+ type: str
+ description:
+ - The registry to install modules from.
+ required: false
+ state:
+ type: str
+ description:
+ - Installation state of the named node.js library
+ - If absent is selected, a name option must be provided
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - Yarn installed in bin path (typically /usr/local/bin)
+'''
+
+EXAMPLES = '''
+- name: Install "imagemin" node.js package.
+ community.general.yarn:
+ name: imagemin
+ path: /app/location
+
+- name: Install "imagemin" node.js package on version 5.3.1
+ community.general.yarn:
+ name: imagemin
+ version: '5.3.1'
+ path: /app/location
+
+- name: Install "imagemin" node.js package globally.
+ community.general.yarn:
+ name: imagemin
+ global: yes
+
+- name: Remove the globally-installed package "imagemin".
+ community.general.yarn:
+ name: imagemin
+ global: yes
+ state: absent
+
+- name: Install "imagemin" node.js package from custom registry.
+ community.general.yarn:
+ name: imagemin
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.yarn:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.yarn:
+ path: /app/location
+ state: latest
+'''
+
+RETURN = '''
+changed:
+ description: Whether Yarn changed any package data
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Provides an error message if Yarn syntax was incorrect
+ returned: failure
+ type: str
+ sample: "Package must be explicitly named when uninstalling."
+invocation:
+ description: Parameters and values used during execution
+ returned: success
+ type: dict
+ sample: {
+ "module_args": {
+ "executable": null,
+ "globally": false,
+ "ignore_scripts": false,
+ "name": null,
+ "path": "/some/path/folder",
+ "production": false,
+ "registry": null,
+ "state": "present",
+ "version": null
+ }
+ }
+out:
+ description: Output generated from Yarn with emojis removed.
+ returned: always
+ type: str
+ sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
+ Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s."
+'''
+
+import os
+import re
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Yarn(object):
+
+ DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global'
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.globally = kwargs['globally']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+
+ # Specify a version of package if version arg passed in
+ self.name_version = None
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('yarn', True)]
+
+ if kwargs['version'] and self.name is not None:
+ self.name_version = self.name + '@' + str(self.version)
+ elif self.name is not None:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+
+ if self.globally:
+ # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
+ args.insert(0, 'global')
+
+ cmd = self.executable + args
+
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # always run Yarn without emojis when called via Ansible
+ cmd.append('--no-emoji')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path and not self.globally:
+ if not os.path.exists(self.path):
+ # Module will make directory if not exists.
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
+ cwd = self.path
+
+ if not os.path.isfile(os.path.join(self.path, 'package.json')):
+ self.module.fail_json(msg="Package.json does not exist in provided path.")
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return(None, None)
+
+ def list(self):
+ cmd = ['list', '--depth=0', '--json']
+
+ installed = list()
+ missing = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ missing.append(self.name)
+ return installed, missing
+
+ result, error = self._exec(cmd, True, False)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ data = json.loads(result)
+ try:
+ dependencies = data['data']['trees']
+ except KeyError:
+ missing.append(self.name)
+ return installed, missing
+
+ for dep in dependencies:
+ name, version = dep['name'].rsplit('@', 1)
+ installed.append(name)
+
+ if self.name not in installed:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ if self.name_version:
+ # Yarn has a separate command for installing packages by name...
+ return self._exec(['add', self.name_version])
+ # And one for installing all packages in package.json
+ return self._exec(['install', '--non-interactive'])
+
+ def update(self):
+ return self._exec(['upgrade', '--latest'])
+
+ def uninstall(self):
+ return self._exec(['remove', self.name])
+
+ def list_outdated(self):
+ outdated = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ return outdated
+
+ cmd_result, err = self._exec(['outdated', '--json'], True, False)
+ if err:
+ self.module.fail_json(msg=err)
+
+ if not cmd_result:
+ return outdated
+
+ outdated_packages_data = cmd_result.splitlines()[1]
+
+ data = json.loads(outdated_packages_data)
+
+ try:
+ outdated_dependencies = data['data']['body']
+ except KeyError:
+ return outdated
+
+ for dep in outdated_dependencies:
+ # Outdated dependencies returned as a list of lists, where
+ # item at index 0 is the name of the dependency
+ outdated.append(dep[0])
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ globally = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ # When installing globally, users should not be able to define a path for installation.
+ # Require a path if global is False, though!
+ if path is None and globally is False:
+ module.fail_json(msg='Path must be specified when not using global arg')
+ elif path and globally is True:
+ module.fail_json(msg='Cannot specify path if doing global installation')
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='Package must be explicitly named when uninstalling.')
+ if state == 'latest':
+ version = 'latest'
+
+ # When installing globally, use the defined path for global node_modules
+ if globally:
+ path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH
+
+ yarn = Yarn(module,
+ name=name,
+ path=path,
+ version=version,
+ globally=globally,
+ production=production,
+ executable=executable,
+ registry=registry,
+ ignore_scripts=ignore_scripts)
+
+ changed = False
+ out = ''
+ err = ''
+ if state == 'present':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+
+ elif state == 'latest':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ outdated = yarn.list_outdated()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+ if len(outdated):
+ changed = True
+ out, err = yarn.update()
+ else:
+ # state == absent
+ installed, missing = yarn.list()
+ if name in installed:
+ changed = True
+ out, err = yarn.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py
new file mode 100644
index 00000000..6b2260fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs).
+ required: true
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: [ absent, present ]
+ required: true
+ origin:
+ description:
+ - Snapshot from which to create a clone.
+ extra_zfs_properties:
+ description:
+ - A dictionary of zfs properties to be set.
+ - See the zfs(8) man page for more information.
+author:
+- Johan Wiren (@johanwiren)
+'''
+
+EXAMPLES = '''
+- name: Create a new file system called myfs in pool rpool with the setuid property turned off
+ community.general.zfs:
+ name: rpool/myfs
+ state: present
+ extra_zfs_properties:
+ setuid: off
+
+- name: Create a new volume called myvol in pool rpool.
+ community.general.zfs:
+ name: rpool/myvol
+ state: present
+ extra_zfs_properties:
+ volsize: 10M
+
+- name: Create a snapshot of rpool/myfs file system.
+ community.general.zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
+
+- name: Create a new file system called myfs2 with snapdir enabled
+ community.general.zfs:
+ name: rpool/myfs2
+ state: present
+ extra_zfs_properties:
+ snapdir: enabled
+
+- name: Create a new file system by cloning a snapshot
+ community.general.zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
+
+- name: Destroy a filesystem
+ community.general.zfs:
+ name: rpool/myfs
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0].split('@')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ origin = self.module.params.get('origin', None)
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if properties:
+ for prop, value in properties.items():
+ if prop == 'volsize':
+ cmd += ['-V', value]
+ elif prop == 'volblocksize':
+ cmd += ['-b', value]
+ else:
+ cmd += ['-o', '%s="%s"' % (prop, value)]
+ if origin and action == 'clone':
+ cmd.append(origin)
+ cmd.append(self.name)
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_properties_if_changed(self):
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.items():
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ origin=dict(type='str', default=None),
+ extra_zfs_properties=dict(type='dict', default={}),
+ ),
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+
+ if module.params.get('origin') and '@' in name:
+ module.fail_json(msg='cannot specify origin when operating on a snapshot')
+
+ # Reverse the boolification of zfs properties
+ for prop, value in module.params['extra_zfs_properties'].items():
+ if isinstance(value, bool):
+ if value is True:
+ module.params['extra_zfs_properties'][prop] = 'on'
+ else:
+ module.params['extra_zfs_properties'][prop] = 'off'
+ else:
+ module.params['extra_zfs_properties'][prop] = value
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zfs = Zfs(module, name, module.params['extra_zfs_properties'])
+
+ if state == 'present':
+ if zfs.exists():
+ zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
new file mode 100644
index 00000000..223d7f72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: zfs_delegate_admin
+short_description: Manage ZFS delegated administration (user admin privileges)
+description:
+ - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
+ operations normally restricted to the superuser.
+ - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - This module attempts to adhere to the behavior of the command line tool as much as possible.
+requirements:
+ - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
+ versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
+options:
+ name:
+ description:
+ - File system or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to allow (C(present)), or unallow (C(absent)) a permission.
+ - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
+ - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ choices: [ absent, present ]
+ default: present
+ users:
+ description:
+ - List of users to whom permission(s) should be granted.
+ type: list
+ groups:
+ description:
+ - List of groups to whom permission(s) should be granted.
+ type: list
+ everyone:
+ description:
+ - Apply permissions to everyone.
+ type: bool
+ default: no
+ permissions:
+ description:
+ - The list of permission(s) to delegate (required if C(state) is C(present)).
+ type: list
+ choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
+ local:
+ description:
+ - Apply permissions to C(name) locally (C(zfs allow -l)).
+ type: bool
+ descendents:
+ description:
+ - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ type: bool
+ recursive:
+ description:
+ - Unallow permissions recursively (ignored when C(state) is C(present)).
+ type: bool
+ default: no
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: adm
+ permissions: allow,unallow
+
+- name: Grant `zfs send` to everyone, plus the group `backup`
+ community.general.zfs_delegate_admin:
+ name: rpool/myvol
+ groups: backup
+ everyone: yes
+ permissions: send
+
+- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: foo,bar
+ permissions: send,receive
+ local: yes
+
+- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ everyone: yes
+ state: absent
+'''
+
+# This module does not return anything other than the standard
+# changed/state/msg/stdout
+RETURN = '''
+'''
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZfsDelegateAdmin(object):
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params.get('name')
+ self.state = module.params.get('state')
+ self.users = module.params.get('users')
+ self.groups = module.params.get('groups')
+ self.everyone = module.params.get('everyone')
+ self.perms = module.params.get('permissions')
+ self.scope = None
+ self.changed = False
+ self.initial_perms = None
+ self.subcommand = 'allow'
+ self.recursive_opt = []
+ self.run_method = self.update
+
+ self.setup(module)
+
+ def setup(self, module):
+ """ Validate params and set up for run.
+ """
+ if self.state == 'absent':
+ self.subcommand = 'unallow'
+ if module.params.get('recursive'):
+ self.recursive_opt = ['-r']
+
+ local = module.params.get('local')
+ descendents = module.params.get('descendents')
+ if (local and descendents) or (not local and not descendents):
+ self.scope = 'ld'
+ elif local:
+ self.scope = 'l'
+ elif descendents:
+ self.scope = 'd'
+ else:
+ self.module.fail_json(msg='Impossible value for local and descendents')
+
+ if not (self.users or self.groups or self.everyone):
+ if self.state == 'present':
+ self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
+ elif self.state == 'absent':
+ self.run_method = self.clear
+ # ansible ensures the else cannot happen here
+
+ self.zfs_path = module.get_bin_path('zfs', True)
+
+ @property
+ def current_perms(self):
+ """ Parse the output of `zfs allow <name>` to retrieve current permissions.
+ """
+ out = self.run_zfs_raw(subcommand='allow')
+ perms = {
+ 'l': {'u': {}, 'g': {}, 'e': []},
+ 'd': {'u': {}, 'g': {}, 'e': []},
+ 'ld': {'u': {}, 'g': {}, 'e': []},
+ }
+ linemap = {
+ 'Local permissions:': 'l',
+ 'Descendent permissions:': 'd',
+ 'Local+Descendent permissions:': 'ld',
+ }
+ scope = None
+ for line in out.splitlines():
+ scope = linemap.get(line, scope)
+ if not scope:
+ continue
+ try:
+ if line.startswith('\tuser ') or line.startswith('\tgroup '):
+ ent_type, ent, cur_perms = line.split()
+ perms[scope][ent_type[0]][ent] = cur_perms.split(',')
+ elif line.startswith('\teveryone '):
+ perms[scope]['e'] = line.split()[1].split(',')
+ except ValueError:
+ self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
+ return perms
+
+ def run_zfs_raw(self, subcommand=None, args=None):
+ """ Run a raw zfs command, fail on error.
+ """
+ cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc:
+ self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
+ return out
+
+ def run_zfs(self, args):
+ """ Run zfs allow/unallow with appropriate options as per module arguments.
+ """
+ args = self.recursive_opt + ['-' + self.scope] + args
+ if self.perms:
+ args.append(','.join(self.perms))
+ return self.run_zfs_raw(args=args)
+
+ def clear(self):
+ """ Called by run() to clear all permissions.
+ """
+ changed = False
+ stdout = ''
+ for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
+ for ent in self.initial_perms[scope][ent_type].keys():
+ stdout += self.run_zfs(['-%s' % ent_type, ent])
+ changed = True
+ for scope in ('ld', 'l', 'd'):
+ if self.initial_perms[scope]['e']:
+ stdout += self.run_zfs(['-e'])
+ changed = True
+ return (changed, stdout)
+
+ def update(self):
+ """ Update permissions as per module arguments.
+ """
+ stdout = ''
+ for ent_type, entities in (('u', self.users), ('g', self.groups)):
+ if entities:
+ stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
+ if self.everyone:
+ stdout += self.run_zfs(['-e'])
+ return (self.initial_perms != self.current_perms, stdout)
+
+ def run(self):
+ """ Run an operation, return results for Ansible.
+ """
+ exit_args = {'state': self.state}
+ self.initial_perms = self.current_perms
+ exit_args['changed'], stdout = self.run_method()
+ if exit_args['changed']:
+ exit_args['msg'] = 'ZFS delegated admin permissions updated'
+ exit_args['stdout'] = stdout
+ self.module.exit_json(**exit_args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ users=dict(type='list'),
+ groups=dict(type='list'),
+ everyone=dict(type='bool', default=False),
+ permissions=dict(type='list',
+ choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote',
+ 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share',
+ 'snapshot', 'unallow']),
+ local=dict(type='bool'),
+ descendents=dict(type='bool'),
+ recursive=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ required_if=[('state', 'present', ['permissions'])],
+ )
+ zfs_delegate_admin = ZfsDelegateAdmin(module)
+ zfs_delegate_admin.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py
new file mode 100644
index 00000000..e7719f68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zfs_facts
+short_description: Gather facts about ZFS datasets.
+description:
+ - Gather facts from ZFS dataset properties.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS dataset name.
+ required: yes
+ aliases: [ "ds", "dataset" ]
+ recurse:
+ description:
+ - Specifies if properties for any children should be recursively
+ displayed.
+ type: bool
+ default: 'no'
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: 'no'
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zfs(1M) man page.
+ default: all
+ type:
+ description:
+ - Specifies which datasets types to display. Multiple values have to be
+ provided in comma-separated form.
+ choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
+ default: all
+ depth:
+ description:
+ - Specifies recursion depth.
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS dataset rpool/export/home
+ community.general.zfs_facts:
+ dataset: rpool/export/home
+
+- name: Report space usage on ZFS filesystems under data/home
+ community.general.zfs_facts:
+ name: data/home
+ recurse: yes
+ type: filesystem
+
+- ansible.builtin.debug:
+ msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
+ with_items: '{{ ansible_zfs_datasets }}'
+'''
+
+RETURN = '''
+name:
+ description: ZFS dataset name
+ returned: always
+ type: str
+ sample: rpool/var/spool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+recurse:
+ description: if we should recurse over ZFS dataset
+ returned: if 'recurse' is set to True
+ type: bool
+ sample: True
+zfs_datasets:
+ description: ZFS dataset facts
+ returned: always
+ type: str
+ sample:
+ {
+ "aclinherit": "restricted",
+ "aclmode": "discard",
+ "atime": "on",
+ "available": "43.8G",
+ "canmount": "on",
+ "casesensitivity": "sensitive",
+ "checksum": "on",
+ "compression": "off",
+ "compressratio": "1.00x",
+ "copies": "1",
+ "creation": "Thu Jun 16 11:37 2016",
+ "dedup": "off",
+ "devices": "on",
+ "exec": "on",
+ "filesystem_count": "none",
+ "filesystem_limit": "none",
+ "logbias": "latency",
+ "logicalreferenced": "18.5K",
+ "logicalused": "3.45G",
+ "mlslabel": "none",
+ "mounted": "yes",
+ "mountpoint": "/rpool",
+ "name": "rpool",
+ "nbmand": "off",
+ "normalization": "none",
+ "org.openindiana.caiman:install": "ready",
+ "primarycache": "all",
+ "quota": "none",
+ "readonly": "off",
+ "recordsize": "128K",
+ "redundant_metadata": "all",
+ "refcompressratio": "1.00x",
+ "referenced": "29.5K",
+ "refquota": "none",
+ "refreservation": "none",
+ "reservation": "none",
+ "secondarycache": "all",
+ "setuid": "on",
+ "sharenfs": "off",
+ "sharesmb": "off",
+ "snapdir": "hidden",
+ "snapshot_count": "none",
+ "snapshot_limit": "none",
+ "sync": "standard",
+ "type": "filesystem",
+ "used": "4.41G",
+ "usedbychildren": "4.41G",
+ "usedbydataset": "29.5K",
+ "usedbyrefreservation": "0",
+ "usedbysnapshots": "0",
+ "utf8only": "off",
+ "version": "5",
+ "vscan": "off",
+ "written": "29.5K",
+ "xattr": "on",
+ "zoned": "off"
+ }
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+
+SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
+
+
+class ZFSFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.recurse = module.params['recurse']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self.type = module.params['type']
+ self.depth = module.params['depth']
+
+ self._datasets = defaultdict(dict)
+ self.facts = []
+
+ def dataset_exists(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ if self.recurse:
+ cmd.append('-r')
+ if int(self.depth) != 0:
+ cmd.append('-d')
+ cmd.append('%s' % self.depth)
+ if self.type:
+ cmd.append('-t')
+ cmd.append(self.type)
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ dataset, property, value = line.split('\t')
+
+ self._datasets[dataset].update({property: value})
+
+ for k, v in iteritems(self._datasets):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_datasets': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
+ recurse=dict(required=False, default=False, type='bool'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
+ depth=dict(required=False, default=0, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ zfs_facts = ZFSFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zfs_facts.name
+
+ if zfs_facts.parsable:
+ result['parsable'] = zfs_facts.parsable
+
+ if zfs_facts.recurse:
+ result['recurse'] = zfs_facts.recurse
+
+ if zfs_facts.dataset_exists():
+ result['ansible_facts'] = zfs_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py
new file mode 100644
index 00000000..156a6376
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: znode
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper
+description:
+ - Create, delete, retrieve, and update znodes using ZooKeeper.
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ value:
+ description:
+ - The value assigned to the znode.
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ choices: [ get, wait, list ]
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ type: bool
+ default: 'no'
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+'''
+
+EXAMPLES = """
+- name: Creating or updating a znode with a given value
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+
+- name: Getting the value and stat structure for a znode
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: get
+
+- name: Listing a particular znode's children
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /zookeeper
+ op: list
+
+- name: Waiting 20 seconds for a znode to appear at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: wait
+ timeout: 20
+
+- name: Deleting a znode at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ state: absent
+
+- name: Creating or updating a znode with a given value on a remote Zookeeper
+ community.general.znode:
+ hosts: 'my-zookeeper-node:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+ delegate_to: 127.0.0.1
+"""
+
+import time
+import traceback
+
+KAZOO_IMP_ERR = None
+try:
+ from kazoo.client import KazooClient
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_IMP_ERR = traceback.format_exc()
+ KAZOO_INSTALLED = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(required=False, default=300, type='int'),
+ recursive=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if isinstance(attr, (int, str)):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, to_bytes(value))
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, to_bytes(value), makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py
new file mode 100644
index 00000000..728c0779
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Å tevko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zpool_facts
+short_description: Gather facts about ZFS pools.
+description:
+ - Gather facts from ZFS pool properties.
+author: Adam Å tevko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS pool name.
+ aliases: [ "pool", "zpool" ]
+ required: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: False
+ required: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zpool(1M) man page.
+ default: all
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS pool rpool
+ community.general.zpool_facts: pool=rpool
+
+- name: Gather space usage about all imported ZFS pools
+ community.general.zpool_facts: properties='free,size'
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
+ with_items: '{{ ansible_zfs_pools }}'
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary containing all the detailed information about the ZFS pool facts
+ returned: always
+ type: complex
+ contains:
+ ansible_zfs_pools:
+ description: ZFS pool facts
+ returned: always
+ type: str
+ sample:
+ {
+ "allocated": "3.46G",
+ "altroot": "-",
+ "autoexpand": "off",
+ "autoreplace": "off",
+ "bootfs": "rpool/ROOT/openindiana",
+ "cachefile": "-",
+ "capacity": "6%",
+ "comment": "-",
+ "dedupditto": "0",
+ "dedupratio": "1.00x",
+ "delegation": "on",
+ "expandsize": "-",
+ "failmode": "wait",
+ "feature@async_destroy": "enabled",
+ "feature@bookmarks": "enabled",
+ "feature@edonr": "enabled",
+ "feature@embedded_data": "active",
+ "feature@empty_bpobj": "active",
+ "feature@enabled_txg": "active",
+ "feature@extensible_dataset": "enabled",
+ "feature@filesystem_limits": "enabled",
+ "feature@hole_birth": "active",
+ "feature@large_blocks": "enabled",
+ "feature@lz4_compress": "active",
+ "feature@multi_vdev_crash_dump": "enabled",
+ "feature@sha512": "enabled",
+ "feature@skein": "enabled",
+ "feature@spacemap_histogram": "active",
+ "fragmentation": "3%",
+ "free": "46.3G",
+ "freeing": "0",
+ "guid": "15729052870819522408",
+ "health": "ONLINE",
+ "leaked": "0",
+ "listsnapshots": "off",
+ "name": "rpool",
+ "readonly": "off",
+ "size": "49.8G",
+ "version": "-"
+ }
+name:
+ description: ZFS pool name
+ returned: always
+ type: str
+ sample: rpool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZPoolFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+
+ self._pools = defaultdict(dict)
+ self.facts = []
+
+ def pool_exists(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ if self.name:
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ pool, property, value = line.split('\t')
+
+ self._pools[pool].update({property: value})
+
+ for k, v in iteritems(self._pools):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_pools': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ zpool_facts = ZPoolFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zpool_facts.name
+
+ if zpool_facts.parsable:
+ result['parsable'] = zpool_facts.parsable
+
+ if zpool_facts.name is not None:
+ if zpool_facts.pool_exists():
+ result['ansible_facts'] = zpool_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
+ else:
+ result['ansible_facts'] = zpool_facts.get_facts()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py
new file mode 100644
index 00000000..9ad539ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade, installed, removed ]
+ default: "present"
+ type: str
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ type: str
+ extra_args_precommand:
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ version_added: '0.2.0'
+ update_cache:
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ allow_vendor_change:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command.
+ version_added: '0.2.0'
+ replacefiles:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--replacefiles) option to I(zypper) install/update command.
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+- name: Install nmap
+ community.general.zypper:
+ name: nmap
+ state: present
+
+- name: Install apache2 with recommended packages
+ community.general.zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+- name: Apply a given patch
+ community.general.zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+- name: Remove the nmap package
+ community.general.zypper:
+ name: nmap
+ state: absent
+
+- name: Install the nginx rpm from a remote repo
+ community.general.zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+- name: Install local rpm file
+ community.general.zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+- name: Update all packages
+ community.general.zypper:
+ name: '*'
+ state: latest
+
+- name: Apply all available patches
+ community.general.zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+- name: Perform a dist-upgrade with additional arguments
+ community.general.zypper:
+ name: '*'
+ state: dist-upgrade
+ allow_vendor_change: true
+ extra_args: '--allow-arch-change'
+
+- name: Perform a installaion of nmap with the install option replacefiles
+ community.general.zypper:
+ name: 'nmap'
+ state: latest
+ replacefiles: true
+
+- name: Refresh repositories and update package openssl
+ community.general.zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+- name: "Install specific version (possible comparisons: <, >, <=, >=, =)"
+ community.general.zypper:
+ name: 'docker>=1.10'
+ state: present
+
+- name: Wait 20 seconds to acquire the lock before failing
+ community.general.zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['replacefiles']:
+ cmd.append('--replacefiles')
+ if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
+ cmd.append('--allow-vendor-change')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ disable_recommends=dict(required=False, default=True, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ force_resolution=dict(required=False, default=False, type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(required=False, default=False, type='bool'),
+ extra_args=dict(required=False, default=None),
+ allow_vendor_change=dict(required=False, default=False, type='bool'),
+ replacefiles=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py
new file mode 100644
index 00000000..55738b58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+options:
+ name:
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ type: str
+ repo:
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ type: str
+ state:
+ description:
+ - A source string state.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+ description:
+ description:
+ - A description of the repository
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ type: bool
+ default: no
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ type: bool
+ default: yes
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ type: int
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ type: bool
+ default: no
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ - Only works with C(.repo) files if `name` is given explicitly.
+ type: bool
+ default: no
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ type: bool
+ default: no
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ type: bool
+ default: yes
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+- name: Add NVIDIA repository for graphics drivers
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
+
+- name: Remove NVIDIA repository
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
+
+- name: Add python development repository
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+- name: Refresh all repos
+ community.general.zypper_repository:
+ repo: '*'
+ runrefresh: yes
+
+- name: Add a repo and add its gpg key
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: yes
+
+- name: Force refresh of a repository
+ community.general.zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo'
+ name: my_ci_repo
+ state: present
+ runrefresh: yes
+'''
+
+import traceback
+
+XML_IMP_ERR = None
+try:
+ from xml.dom.minidom import parseString as parseXML
+ HAS_XML = True
+except ImportError:
+ XML_IMP_ERR = traceback.format_exc()
+ HAS_XML = False
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
+ if not HAS_XML:
+ module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd('refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default=False, type='bool'),
+ description=dict(required=False),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of=[['state', 'runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
+ else:
+ shortname = alias
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+
+if __name__ == '__main__':
+ main()